From 2022ace7b42af3e8e474943d62a5bf368b8d4e7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20K=C3=BChle?= Date: Fri, 27 Oct 2023 07:11:30 +0200 Subject: [PATCH 01/68] Move RuntimeChannel type arg T to associated types (#1314) RuntimeChannel::batch_message_channel needs to be generic over the message type. The type used to be declared on the RuntimeChannel trait. This means a RuntimeChannel can only be used with one particular message type, which feels unfortunate. fn install>(runtime: R) { // Can't use the same runtime here. :-( TracerProvider::builder().with_batch_exporter(e, runtime); LoggerProvider::builder().with_batch_exporter(e, runtime); } This change moves the type argument to the batch_message_channel function and the associated types Receiver and Sender. Channels are still specific to a message type, but a RuntimeChannel can be used with any number of message types. fn install(runtime: R) { // It works. :-) TracerProvider::builder().with_batch_exporter(e, runtime); LoggerProvider::builder().with_batch_exporter(e, runtime); } This also means the BatchMessage types no longer need to be public. --- .../src/trace/exporter/jaeger_json.rs | 4 +- opentelemetry-datadog/src/exporter/mod.rs | 7 +-- opentelemetry-jaeger/src/exporter/runtime.rs | 4 +- opentelemetry-otlp/src/logs.rs | 6 +-- opentelemetry-otlp/src/span.rs | 5 +- opentelemetry-sdk/CHANGELOG.md | 2 + opentelemetry-sdk/src/logs/log_emitter.rs | 4 +- opentelemetry-sdk/src/logs/log_processor.rs | 16 +++--- opentelemetry-sdk/src/logs/mod.rs | 3 +- opentelemetry-sdk/src/runtime.rs | 52 ++++++++++++------- opentelemetry-sdk/src/trace/mod.rs | 3 +- opentelemetry-sdk/src/trace/provider.rs | 4 +- opentelemetry-sdk/src/trace/runtime_tests.rs | 8 +-- opentelemetry-sdk/src/trace/sampler.rs | 2 +- .../trace/sampler/jaeger_remote/sampler.rs | 10 ++-- opentelemetry-sdk/src/trace/span_processor.rs | 16 +++--- opentelemetry-zipkin/src/exporter/mod.rs | 7 +-- 17 files changed, 77 insertions(+), 76 deletions(-) diff --git a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs index 085e51b121..38b25aa0a3 100644 --- a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs +++ b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs @@ -8,7 +8,7 @@ use opentelemetry::trace::{SpanId, TraceError}; use opentelemetry_sdk::{ export::trace::{ExportResult, SpanData, SpanExporter}, runtime::RuntimeChannel, - trace::{BatchMessage, Tracer, TracerProvider}, + trace::{Tracer, TracerProvider}, }; use opentelemetry_semantic_conventions::SCHEMA_URL; use std::collections::HashMap; @@ -213,7 +213,7 @@ fn opentelemetry_value_to_json(value: &opentelemetry::Value) -> (&str, serde_jso /// /// [`RuntimeChannel`]: opentelemetry_sdk::runtime::RuntimeChannel #[async_trait] -pub trait JaegerJsonRuntime: RuntimeChannel + std::fmt::Debug { +pub trait JaegerJsonRuntime: RuntimeChannel + std::fmt::Debug { /// Create a new directory if the given path does not exist yet async fn create_dir(&self, path: &Path) -> ExportResult; /// Write the provided content to a new file at the given path diff --git a/opentelemetry-datadog/src/exporter/mod.rs b/opentelemetry-datadog/src/exporter/mod.rs index 1ae28e6cd6..9b07183390 100644 --- a/opentelemetry-datadog/src/exporter/mod.rs +++ b/opentelemetry-datadog/src/exporter/mod.rs @@ -15,7 +15,7 @@ use opentelemetry_sdk::{ export::trace::{ExportResult, SpanData, SpanExporter}, resource::{ResourceDetector, SdkProvidedResourceDetector}, runtime::RuntimeChannel, - trace::{BatchMessage, Config, Tracer, TracerProvider}, + trace::{Config, Tracer, TracerProvider}, Resource, }; use opentelemetry_semantic_conventions as semcov; @@ -300,10 +300,7 @@ impl DatadogPipelineBuilder { /// Install the Datadog trace exporter pipeline using a batch span processor with the specified /// runtime. - pub fn install_batch>( - mut self, - runtime: R, - ) -> Result { + pub fn install_batch(mut self, runtime: R) -> Result { let (config, service_name) = self.build_config_and_service_name(); let exporter = self.build_exporter_with_service_name(service_name)?; let mut provider_builder = TracerProvider::builder().with_batch_exporter(exporter, runtime); diff --git a/opentelemetry-jaeger/src/exporter/runtime.rs b/opentelemetry-jaeger/src/exporter/runtime.rs index 7b52dddd1c..5348eefbcf 100644 --- a/opentelemetry-jaeger/src/exporter/runtime.rs +++ b/opentelemetry-jaeger/src/exporter/runtime.rs @@ -5,14 +5,14 @@ ))] use crate::exporter::addrs_and_family; use async_trait::async_trait; -use opentelemetry_sdk::{runtime::RuntimeChannel, trace::BatchMessage}; +use opentelemetry_sdk::runtime::RuntimeChannel; use std::net::ToSocketAddrs; /// Jaeger Trace Runtime is an extension to [`RuntimeChannel`]. /// /// [`RuntimeChannel`]: opentelemetry_sdk::runtime::RuntimeChannel #[async_trait] -pub trait JaegerTraceRuntime: RuntimeChannel + std::fmt::Debug { +pub trait JaegerTraceRuntime: RuntimeChannel + std::fmt::Debug { /// A communication socket between Jaeger client and agent. type Socket: std::fmt::Debug + Send + Sync; diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 2372b21248..21f8fbb0ed 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -19,7 +19,7 @@ use opentelemetry::{ global, logs::{LogError, LoggerProvider}, }; -use opentelemetry_sdk::{self, export::logs::LogData, logs::BatchMessage, runtime::RuntimeChannel}; +use opentelemetry_sdk::{self, export::logs::LogData, runtime::RuntimeChannel}; /// Compression algorithm to use, defaults to none. pub const OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"; @@ -166,7 +166,7 @@ impl OtlpLogPipeline { /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the current crate version. /// /// [`Logger`]: opentelemetry_sdk::logs::Logger - pub fn install_batch>( + pub fn install_batch( self, runtime: R, ) -> Result { @@ -198,7 +198,7 @@ fn build_simple_with_exporter( logger } -fn build_batch_with_exporter>( +fn build_batch_with_exporter( exporter: LogExporter, log_config: Option, runtime: R, diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index 475ee1fc5c..a8503c87a4 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -12,7 +12,6 @@ use opentelemetry::{ use opentelemetry_sdk::{ self as sdk, export::trace::{ExportResult, SpanData}, - trace::BatchMessage, }; use opentelemetry_semantic_conventions::SCHEMA_URL; use sdk::runtime::RuntimeChannel; @@ -122,7 +121,7 @@ impl OtlpTracePipeline { /// `install_batch` will panic if not called within a tokio runtime /// /// [`Tracer`]: opentelemetry::trace::Tracer - pub fn install_batch>( + pub fn install_batch( self, runtime: R, ) -> Result { @@ -154,7 +153,7 @@ fn build_simple_with_exporter( tracer } -fn build_batch_with_exporter>( +fn build_batch_with_exporter( exporter: SpanExporter, trace_config: Option, runtime: R, diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index aaa4d14ad6..fb5712ecd7 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -52,10 +52,12 @@ `should_sample` changes `attributes` from `OrderMap` to `Vec`. +- **Breaking** Move type argument from `RuntimeChannel` to associated types [#1314](https://github.com/open-telemetry/opentelemetry-rust/pull/1314) ### Removed - Remove context from Metric force_flush [#1245](https://github.com/open-telemetry/opentelemetry-rust/pull/1245) +- Remove `logs::BatchMessage` and `trace::BatchMessage` types [#1314](https://github.com/open-telemetry/opentelemetry-rust/pull/1314) ### Fixed diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs index ee15ae47c2..557ed552c5 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -1,4 +1,4 @@ -use super::{BatchLogProcessor, BatchMessage, Config, LogProcessor, SimpleLogProcessor}; +use super::{BatchLogProcessor, Config, LogProcessor, SimpleLogProcessor}; use crate::{ export::logs::{LogData, LogExporter}, runtime::RuntimeChannel, @@ -140,7 +140,7 @@ impl Builder { } /// The `LogExporter` setup using a default `BatchLogProcessor` that this provider should use. - pub fn with_batch_exporter>( + pub fn with_batch_exporter( self, exporter: T, runtime: R, diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index 6bac467a5b..342be3395f 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -109,11 +109,11 @@ impl LogProcessor for SimpleLogProcessor { /// A [`LogProcessor`] that asynchronously buffers log records and reports /// them at a preconfigured interval. -pub struct BatchLogProcessor> { - message_sender: R::Sender, +pub struct BatchLogProcessor { + message_sender: R::Sender, } -impl> Debug for BatchLogProcessor { +impl Debug for BatchLogProcessor { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("BatchLogProcessor") .field("message_sender", &self.message_sender) @@ -121,7 +121,7 @@ impl> Debug for BatchLogProcessor { } } -impl> LogProcessor for BatchLogProcessor { +impl LogProcessor for BatchLogProcessor { fn emit(&self, data: LogData) { let result = self.message_sender.try_send(BatchMessage::ExportLog(data)); @@ -158,7 +158,7 @@ impl> LogProcessor for BatchLogProcessor { } } -impl> BatchLogProcessor { +impl BatchLogProcessor { pub(crate) fn new(mut exporter: Box, config: BatchConfig, runtime: R) -> Self { let (message_sender, message_receiver) = runtime.batch_message_channel(config.max_queue_size); @@ -262,7 +262,7 @@ async fn export_with_timeout( batch: Vec, ) -> ExportResult where - R: RuntimeChannel, + R: RuntimeChannel, E: LogExporter + ?Sized, { if batch.is_empty() { @@ -323,7 +323,7 @@ pub struct BatchLogProcessorBuilder { impl BatchLogProcessorBuilder where E: LogExporter + 'static, - R: RuntimeChannel, + R: RuntimeChannel, { /// Set max queue size for batches pub fn with_max_queue_size(self, size: usize) -> Self { @@ -372,7 +372,7 @@ where /// Messages sent between application thread and batch log processor's work thread. #[allow(clippy::large_enum_variant)] #[derive(Debug)] -pub enum BatchMessage { +enum BatchMessage { /// Export logs, usually called when the log is emitted. ExportLog(LogData), /// Flush the current buffer to the backend, it can be triggered by diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index a2ed90a7c6..45d16d5467 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -7,6 +7,5 @@ mod log_processor; pub use config::{config, Config}; pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ - BatchConfig, BatchLogProcessor, BatchLogProcessorBuilder, BatchMessage, LogProcessor, - SimpleLogProcessor, + BatchConfig, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, SimpleLogProcessor, }; diff --git a/opentelemetry-sdk/src/runtime.rs b/opentelemetry-sdk/src/runtime.rs index 565ebb8a48..7705c10e91 100644 --- a/opentelemetry-sdk/src/runtime.rs +++ b/opentelemetry-sdk/src/runtime.rs @@ -133,19 +133,22 @@ impl Runtime for AsyncStd { } } -/// `MessageRuntime` is an extension to [`Runtime`]. Currently, it provides a +/// `RuntimeChannel` is an extension to [`Runtime`]. Currently, it provides a /// channel that is used by the [log] and [span] batch processors. /// /// [log]: crate::logs::BatchLogProcessor /// [span]: crate::trace::BatchSpanProcessor -pub trait RuntimeChannel: Runtime { +pub trait RuntimeChannel: Runtime { /// A future stream to receive batch messages from channels. - type Receiver: Stream + Send; + type Receiver: Stream + Send; /// A batch messages sender that can be sent across threads safely. - type Sender: TrySend + Debug; + type Sender: TrySend + Debug; /// Return the sender and receiver used to send batch messages. - fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver); + fn batch_message_channel( + &self, + capacity: usize, + ) -> (Self::Sender, Self::Receiver); } /// Error returned by a [`TrySend`] implementation. @@ -187,11 +190,14 @@ impl TrySend for tokio::sync::mpsc::Sender { #[cfg(feature = "rt-tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] -impl RuntimeChannel for Tokio { - type Receiver = tokio_stream::wrappers::ReceiverStream; - type Sender = tokio::sync::mpsc::Sender; - - fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { +impl RuntimeChannel for Tokio { + type Receiver = tokio_stream::wrappers::ReceiverStream; + type Sender = tokio::sync::mpsc::Sender; + + fn batch_message_channel( + &self, + capacity: usize, + ) -> (Self::Sender, Self::Receiver) { let (sender, receiver) = tokio::sync::mpsc::channel(capacity); ( sender, @@ -202,11 +208,14 @@ impl RuntimeChannel for Tokio { #[cfg(feature = "rt-tokio-current-thread")] #[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] -impl RuntimeChannel for TokioCurrentThread { - type Receiver = tokio_stream::wrappers::ReceiverStream; - type Sender = tokio::sync::mpsc::Sender; - - fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { +impl RuntimeChannel for TokioCurrentThread { + type Receiver = tokio_stream::wrappers::ReceiverStream; + type Sender = tokio::sync::mpsc::Sender; + + fn batch_message_channel( + &self, + capacity: usize, + ) -> (Self::Sender, Self::Receiver) { let (sender, receiver) = tokio::sync::mpsc::channel(capacity); ( sender, @@ -229,11 +238,14 @@ impl TrySend for async_std::channel::Sender { #[cfg(feature = "rt-async-std")] #[cfg_attr(docsrs, doc(cfg(feature = "rt-async-std")))] -impl RuntimeChannel for AsyncStd { - type Receiver = async_std::channel::Receiver; - type Sender = async_std::channel::Sender; - - fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { +impl RuntimeChannel for AsyncStd { + type Receiver = async_std::channel::Receiver; + type Sender = async_std::channel::Sender; + + fn batch_message_channel( + &self, + capacity: usize, + ) -> (Self::Sender, Self::Receiver) { async_std::channel::bounded(capacity) } } diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index bb731f2132..3ef46c4925 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -26,8 +26,7 @@ pub use sampler::{Sampler, ShouldSample}; pub use span::Span; pub use span_limit::SpanLimits; pub use span_processor::{ - BatchConfig, BatchMessage, BatchSpanProcessor, BatchSpanProcessorBuilder, SimpleSpanProcessor, - SpanProcessor, + BatchConfig, BatchSpanProcessor, BatchSpanProcessorBuilder, SimpleSpanProcessor, SpanProcessor, }; pub use tracer::Tracer; diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 30c05c2003..93572cc8ba 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -9,7 +9,7 @@ //! not duplicate this data to avoid that different [`Tracer`] instances //! of the [`TracerProvider`] have different versions of these data. use crate::runtime::RuntimeChannel; -use crate::trace::{BatchMessage, BatchSpanProcessor, SimpleSpanProcessor, Tracer}; +use crate::trace::{BatchSpanProcessor, SimpleSpanProcessor, Tracer}; use crate::{export::trace::SpanExporter, trace::SpanProcessor}; use crate::{InstrumentationLibrary, Resource}; use once_cell::sync::OnceCell; @@ -166,7 +166,7 @@ impl Builder { } /// The [`SpanExporter`] setup using a default [`BatchSpanProcessor`] that this provider should use. - pub fn with_batch_exporter>( + pub fn with_batch_exporter( self, exporter: T, runtime: R, diff --git a/opentelemetry-sdk/src/trace/runtime_tests.rs b/opentelemetry-sdk/src/trace/runtime_tests.rs index e88ce596af..610d140b7a 100644 --- a/opentelemetry-sdk/src/trace/runtime_tests.rs +++ b/opentelemetry-sdk/src/trace/runtime_tests.rs @@ -6,8 +6,6 @@ use crate::export::trace::{ExportResult, SpanExporter}; use crate::runtime; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::runtime::RuntimeChannel; -#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] -use crate::trace::BatchMessage; use futures_util::future::BoxFuture; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use opentelemetry::global::*; @@ -42,7 +40,7 @@ impl SpanCountExporter { } #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] -fn build_batch_tracer_provider>( +fn build_batch_tracer_provider( exporter: SpanCountExporter, runtime: R, ) -> crate::trace::TracerProvider { @@ -61,9 +59,7 @@ fn build_simple_tracer_provider(exporter: SpanCountExporter) -> crate::trace::Tr } #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] -async fn test_set_provider_in_tokio>( - runtime: R, -) -> Arc { +async fn test_set_provider_in_tokio(runtime: R) -> Arc { let exporter = SpanCountExporter::new(); let span_count = exporter.span_count.clone(); let _ = set_tracer_provider(build_batch_tracer_provider(exporter, runtime)); diff --git a/opentelemetry-sdk/src/trace/sampler.rs b/opentelemetry-sdk/src/trace/sampler.rs index dc8bde293f..02ba4f3f8c 100644 --- a/opentelemetry-sdk/src/trace/sampler.rs +++ b/opentelemetry-sdk/src/trace/sampler.rs @@ -156,7 +156,7 @@ impl Sampler { where C: HttpClient + 'static, Sampler: ShouldSample, - R: crate::runtime::RuntimeChannel, + R: crate::runtime::RuntimeChannel, Svc: Into, { JaegerRemoteSamplerBuilder::new(runtime, http_client, default_sampler, service_name) diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs index 07c05b94b1..6f942cbd7f 100644 --- a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs @@ -1,7 +1,7 @@ use crate::runtime::RuntimeChannel; use crate::trace::sampler::jaeger_remote::remote::SamplingStrategyResponse; use crate::trace::sampler::jaeger_remote::sampling_strategy::Inner; -use crate::trace::{BatchMessage, Sampler, ShouldSample}; +use crate::trace::{Sampler, ShouldSample}; use futures_util::{stream, StreamExt as _}; use http::Uri; use opentelemetry::trace::{Link, SamplingResult, SpanKind, TraceError, TraceId}; @@ -18,7 +18,7 @@ const DEFAULT_REMOTE_SAMPLER_ENDPOINT: &str = "http://localhost:5778/sampling"; #[derive(Debug)] pub struct JaegerRemoteSamplerBuilder where - R: RuntimeChannel, + R: RuntimeChannel, C: HttpClient + 'static, S: ShouldSample + 'static, { @@ -35,7 +35,7 @@ impl JaegerRemoteSamplerBuilder where C: HttpClient + 'static, S: ShouldSample + 'static, - R: RuntimeChannel, + R: RuntimeChannel, { pub(crate) fn new( runtime: R, @@ -155,7 +155,7 @@ impl JaegerRemoteSampler { leaky_bucket_size: f64, ) -> Self where - R: RuntimeChannel, + R: RuntimeChannel, C: HttpClient + 'static, S: ShouldSample + 'static, { @@ -185,7 +185,7 @@ impl JaegerRemoteSampler { shutdown: futures_channel::mpsc::Receiver<()>, endpoint: Uri, ) where - R: RuntimeChannel, + R: RuntimeChannel, C: HttpClient + 'static, { // todo: review if we need 'static here diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 9dd60941d6..73c92f6903 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -252,11 +252,11 @@ enum Message { /// [`executor`]: https://docs.rs/futures/0.3/futures/executor/index.html /// [`tokio`]: https://tokio.rs /// [`async-std`]: https://async.rs -pub struct BatchSpanProcessor> { - message_sender: R::Sender, +pub struct BatchSpanProcessor { + message_sender: R::Sender, } -impl> fmt::Debug for BatchSpanProcessor { +impl fmt::Debug for BatchSpanProcessor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BatchSpanProcessor") .field("message_sender", &self.message_sender) @@ -264,7 +264,7 @@ impl> fmt::Debug for BatchSpanProcessor { } } -impl> SpanProcessor for BatchSpanProcessor { +impl SpanProcessor for BatchSpanProcessor { fn on_start(&self, _span: &mut Span, _cx: &Context) { // Ignored } @@ -310,7 +310,7 @@ impl> SpanProcessor for BatchSpanProcessor { // 2. Most of the messages will be ExportSpan. #[allow(clippy::large_enum_variant)] #[derive(Debug)] -pub enum BatchMessage { +enum BatchMessage { /// Export spans, usually called when span ends ExportSpan(SpanData), /// Flush the current buffer to the backend, it can be triggered by @@ -328,7 +328,7 @@ struct BatchSpanProcessorInternal { config: BatchConfig, } -impl> BatchSpanProcessorInternal { +impl BatchSpanProcessorInternal { async fn flush(&mut self, res_channel: Option>) { let export_task = self.export(); let task = Box::pin(async move { @@ -465,7 +465,7 @@ impl> BatchSpanProcessorInternal { } } -impl> BatchSpanProcessor { +impl BatchSpanProcessor { pub(crate) fn new(exporter: Box, config: BatchConfig, runtime: R) -> Self { let (message_sender, message_receiver) = runtime.batch_message_channel(config.max_queue_size); @@ -648,7 +648,7 @@ pub struct BatchSpanProcessorBuilder { impl BatchSpanProcessorBuilder where E: SpanExporter + 'static, - R: RuntimeChannel, + R: RuntimeChannel, { /// Set max queue size for batches pub fn with_max_queue_size(self, size: usize) -> Self { diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index 54eea89008..bc124593a5 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -12,7 +12,7 @@ use opentelemetry_sdk::{ export::{trace, ExportError}, resource::{ResourceDetector, SdkProvidedResourceDetector}, runtime::RuntimeChannel, - trace::{BatchMessage, Config, Tracer, TracerProvider}, + trace::{Config, Tracer, TracerProvider}, Resource, }; use opentelemetry_semantic_conventions as semcov; @@ -184,10 +184,7 @@ impl ZipkinPipelineBuilder { /// Install the Zipkin trace exporter pipeline with a batch span processor using the specified /// runtime. - pub fn install_batch>( - mut self, - runtime: R, - ) -> Result { + pub fn install_batch(mut self, runtime: R) -> Result { let (config, endpoint) = self.init_config_and_endpoint(); let exporter = self.init_exporter_with_endpoint(endpoint)?; let mut provider_builder = TracerProvider::builder().with_batch_exporter(exporter, runtime); From ed97a1bd9efbbde2f080bdad7331b91f277b9f6b Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Fri, 27 Oct 2023 07:11:57 +0200 Subject: [PATCH 02/68] Remove opentelemetry-dynatrace source. (#1321) We will no longer build/support it as Dynatrace is migrating towards OTLP ingestion. Relates #1099 --- Cargo.toml | 1 - opentelemetry-dynatrace/Cargo.toml | 85 -- opentelemetry-dynatrace/README.md | 99 +- opentelemetry-dynatrace/src/exporter/mod.rs | 52 - opentelemetry-dynatrace/src/lib.rs | 309 ------ opentelemetry-dynatrace/src/metric.rs | 448 -------- .../src/transform/common.rs | 24 - .../src/transform/metrics.rs | 987 ------------------ opentelemetry-dynatrace/src/transform/mod.rs | 11 - opentelemetry-dynatrace/tests/http_test.rs | 185 ---- opentelemetry/README.md | 2 +- scripts/lint.sh | 8 - scripts/test.sh | 1 - 13 files changed, 7 insertions(+), 2205 deletions(-) delete mode 100644 opentelemetry-dynatrace/Cargo.toml delete mode 100644 opentelemetry-dynatrace/src/exporter/mod.rs delete mode 100644 opentelemetry-dynatrace/src/lib.rs delete mode 100644 opentelemetry-dynatrace/src/metric.rs delete mode 100644 opentelemetry-dynatrace/src/transform/common.rs delete mode 100644 opentelemetry-dynatrace/src/transform/metrics.rs delete mode 100644 opentelemetry-dynatrace/src/transform/mod.rs delete mode 100644 opentelemetry-dynatrace/tests/http_test.rs diff --git a/Cargo.toml b/Cargo.toml index 5c567f94ca..39ee5c00b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,6 @@ members = [ "opentelemetry-aws", "opentelemetry-contrib", "opentelemetry-datadog", - "opentelemetry-dynatrace", "opentelemetry-http", "opentelemetry-jaeger", "opentelemetry-jaeger/examples/actix-udp", diff --git a/opentelemetry-dynatrace/Cargo.toml b/opentelemetry-dynatrace/Cargo.toml deleted file mode 100644 index 9e87a22f3c..0000000000 --- a/opentelemetry-dynatrace/Cargo.toml +++ /dev/null @@ -1,85 +0,0 @@ -[package] -name = "opentelemetry-dynatrace" -version = "0.4.0" -description = "Dynatrace exporters and propagators for OpenTelemetry" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-dynatrace" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-dynatrace" -readme = "README.md" -categories = [ - "development-tools::debugging", - "development-tools::profiling", - "asynchronous", -] -keywords = ["opentelemetry", "metrics", "dynatrace"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[[test]] -name = "integration_tokio" -path = "tests/http_test.rs" -required-features = ["metrics", "rt-tokio"] - -[features] -default = ["metrics", "rt-tokio", "reqwest-client", "reqwest-rustls"] - -metrics = ["opentelemetry/metrics"] - -reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] -reqwest-rustls = ["reqwest/rustls-tls-native-roots"] -reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] -surf-client = ["surf", "opentelemetry-http/surf"] -isahc-client = ["isahc", "opentelemetry-http/isahc"] - -rt-tokio = ["tokio", "opentelemetry/rt-tokio"] -rt-async-std = ["async-std", "futures-channel"] - -wasm = [ - "base64", - "getrandom/js", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[dependencies] -async-std = { version = "= 1.10.0", features = ["unstable"], optional = true } -base64 = { version = "0.21", optional = true } -futures-channel = { version = "0.3", optional = true } -getrandom = { version = "0.2", optional = true } -http = "0.2" -isahc = { version = "1.4", default-features = false, optional = true } -js-sys = { version = "0.3.5", optional = true } -opentelemetry = { version = "0.19", default-features = false } -opentelemetry_sdk = { version = "0.19", features = ["metrics"] } -opentelemetry-http = { version = "0.8", default-features = false } -reqwest = { version = "0.11", default-features = false, optional = true } -surf = { version = "2.0", default-features = false, optional = true } -thiserror = "1.0" -tokio = { version = "1.0", default-features = false, features = ["rt", "sync"], optional = true } -wasm-bindgen = { version = "0.2", default-features = false, features = ["std"], optional = true } -wasm-bindgen-futures = { version = "0.4", default-features = false, optional = true } - -[dependencies.web-sys] -version = "0.3" -default-features = false -features = [ - "Headers", - "Request", - "RequestInit", - "RequestMode", - "Response", - "Window", -] -optional = true - -[dev-dependencies] -opentelemetry_sdk = { version = "0.19.0", features = ["rt-tokio"] } -futures-util = { version = "0.3", default-features = false } -tokio = { version = "1.0", default-features = false, features = ["macros", "rt-multi-thread", "sync", "test-util"] } -hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] } diff --git a/opentelemetry-dynatrace/README.md b/opentelemetry-dynatrace/README.md index 256f2dfadb..0f978d024e 100644 --- a/opentelemetry-dynatrace/README.md +++ b/opentelemetry-dynatrace/README.md @@ -4,100 +4,13 @@ # Dynatrace -*This is the last release of the crate!* +The final release of this crate was 0.4.0. Dynatrace now recommends using the OTLP exporter. They also provide a [migration guide] +For an example on how to configure the OTLP exporter in a Rust application, check out the [Rust integration walk-through] page in the Dynatrace documentation. -[Dynatrace](https://www.dynatrace.com/integrations/opentelemetry) supports native -OpenTelemetry protocol (OTLP) ingest for traces, metrics and logs. -All signals can be sent directly to Dynatrace via **OTLP protobuf over HTTP** -using the built-in OTLP/HTTP Exporter available in the OpenTelemetry Rust SDK. -More information on configuring your Rust applications to use the OTLP exporter can be found in the -[Dynatrace documentation](https://www.dynatrace.com/support/help/shortlink/otel-wt-rust). +[migration guide]: https://www.dynatrace.com/support/help/shortlink/migrating-dynatrace-metrics-exporter-otlp-exporter#migrate-applications +[Rust integration walk-through]: https://www.dynatrace.com/support/help/shortlink/otel-wt-rust -## Dynatrace OpenTelemetry Metrics Exporter for Rust -![Static Badge](https://img.shields.io/badge/status-deprecated-orange) -[![Crates.io: opentelemetry-dynatrace](https://img.shields.io/crates/v/opentelemetry-dynatrace.svg)](https://crates.io/crates/opentelemetry-dynatrace) -[![Documentation](https://docs.rs/opentelemetry-dynatrace/badge.svg)](https://docs.rs/opentelemetry-dynatrace) -[![LICENSE](https://img.shields.io/crates/l/opentelemetry-dynatrace)](./LICENSE) -[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) +## Notice Removal -> **Warning** -> Dynatrace supports native OpenTelemetry protocol (OTLP) ingest for traces, metrics and logs. -> Therefore, the proprietary Dynatrace OpenTelemetry metrics exporter is deprecated in favor of exporting via OTLP/HTTP. -> -> The exporter is still available but after the end of 2023, no support, updates, or compatibility with newer OTel versions will be provided. -> -> Please refer to the [migration guide](https://www.dynatrace.com/support/help/shortlink/migrating-dynatrace-metrics-exporter-otlp-exporter#migrate-applications) for instructions on how to migrate to the OTLP HTTP exporter, as well as reasoning and benefits for this transition. -> -> For an example on how to configure the OTLP exporter in a Rust application, check out the [Rust integration walk-through](https://www.dynatrace.com/support/help/shortlink/otel-wt-rust) page in the Dynatrace documentation. - -### Overview - -[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, -generate, collect, and export telemetry data (metrics, logs, and traces) for -analysis in order to understand your software's performance and behavior. This -crate provides additional propagators and exporters for sending telemetry data -to [`Dynatrace`]. - -### Exporter features - -* **Metrics** - Ingest metric data to Dynatrace using the [Dynatrace Metrics ingestion protocol]. - -This exporter only supports the ingestion of metric data. For trace data, use -[`opentelemetry-otlp`] as described in the -[Dynatrace documentation for Rust]. This exporter is based on the OpenTelemetry -Metrics SDK for Rust, which is currently in an alpha state and neither -considered stable nor complete as of this writing. As such, this exporter is -not intended for production use until the underlying OpenTelemetry Metrics API -and SDK are stable. See [`open-telemetry/opentelemetry-rust`] for the current -state of the OpenTelemetry SDK for Rust. - -[Dynatrace]: https://www.dynatrace.com/ -[Dynatrace Metrics ingestion protocol]: https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol/ -[Dynatrace documentation for Rust]: https://www.dynatrace.com/support/help/shortlink/opent-rust -[`open-telemetry/opentelemetry-rust`]: https://github.com/open-telemetry/opentelemetry-rust - -#### Examples - -The examples directory contains an [advanced example](../examples/dynatrace) -showing the ingestion of trace data and metric data together. - -[`opentelemetry-otlp`]: https://crates.io/crates/opentelemetry-otlp -[`opentelemetry-dynatrace`]: https://crates.io/crates/opentelemetry-dynatrace - -### Performance - -For optimal performance, a batch exporter is used. You can enable the `rt-tokio` -feature flag to use the [`tokio`] runtime, or enable the `rt-async-std` feature -flag to use the [`async-std`] runtime to have a batch exporter configured for -you automatically. - -[`tokio`]: https://tokio.rs -[`async-std`]: https://async.rs - -### Choosing an HTTP client - -The HTTP client that this exporter will use can be overridden with feature -flags. By default the `reqwest-client` feature flag is enabled which will use -the [`reqwest`] http client. - -- `reqwest-client` (enabled by default): use the [`reqwest`] http client to send metric data. -- `reqwest-tls` (enabled by default): use the [`reqwest`] http client with [`rustls`] to enable TLS support. -- `reqwest-blocking-client`: use the [`reqwest`] blocking http client to send metric data. -- `isahc-client`: use the [`isahc`] http client to send metric data. -- `surf-client`: use the [`surf`] http client to send metric data. - -You can also configure your own http client implementation using the `HttpClient` trait. - -[`reqwest`]: https://docs.rs/reqwest/latest/reqwest/ -[`rustls`]: https://docs.rs/rustls/latest/rustls/ -[`isahc`]: https://docs.rs/isahc/latest/isahc/ -[`surf`]: https://docs.rs/surf/latest/surf/ - -### WebAssembly - -WebAssembly support can be enabled with the `wasm` feature flag. - -[`Dynatrace`]: https://www.dynatrace.com/ -[`OpenTelemetry`]: https://crates.io/crates/opentelemetry +This README and directory can be removed after any time in 2024 at least 6 months from the last release date of `opentelemetry-dynatrace`. diff --git a/opentelemetry-dynatrace/src/exporter/mod.rs b/opentelemetry-dynatrace/src/exporter/mod.rs deleted file mode 100644 index 8c0213ecdd..0000000000 --- a/opentelemetry-dynatrace/src/exporter/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! OpenTelemetry Dynatrace Exporter Configuration -//! -/// Configuration for the Dynatrace exporter. -/// -/// ## Examples -/// -/// ```no_run -/// use opentelemetry_dynatrace::ExportConfig; -/// # fn main() { -/// let exporter_config = ExportConfig::default() -/// .with_token("*****".to_string()); -/// # } -/// ``` -#[derive(Debug, Default)] -pub struct ExportConfig { - /// The address of the Dynatrace endpoint. - /// - /// # Examples - /// - /// * Managed https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest - /// * SaaS https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest - /// * Environment ActiveGate https://{your-activegate-domain}/e/{your-environment-id}/api/v2/metrics/ingest - /// - /// If no endpoint is defined, the endpoint of the locally installed Dynatrace OneAgent will be used as a fallback. - pub endpoint: Option, - - /// The API token for authentication. - /// - /// Authentication is not required when using the locally installed Dynatrace OneAgent as an endpoint. - pub token: Option, -} - -impl ExportConfig { - /// Set the address of the Dynatrace endoint. - pub fn with_endpoint>(mut self, endpoint: T) -> Self { - self.endpoint = Some(endpoint.into()); - self - } - - /// Set the API token for authentication. - pub fn with_token>(mut self, token: T) -> Self { - self.token = Some(token.into()); - self - } - - /// Set the export configuration. This will override all previous configuration. - pub fn with_export_config(mut self, export_config: ExportConfig) -> Self { - self.endpoint = export_config.endpoint; - self.token = export_config.token; - self - } -} diff --git a/opentelemetry-dynatrace/src/lib.rs b/opentelemetry-dynatrace/src/lib.rs deleted file mode 100644 index 08c7658062..0000000000 --- a/opentelemetry-dynatrace/src/lib.rs +++ /dev/null @@ -1,309 +0,0 @@ -//! The OpenTelemetry Dynatrace Exporter supports exporting metric data to [Dynatrace]. -//! -//! This exporter only supports the ingestion of metric data using the [Dynatrace Metrics ingestion protocol]. -//! For trace data, use [`opentelemetry-otlp`] as described in the [Dynatrace documentation for Rust]. -//! -//! # Quickstart -//! -//! You can start a new Dynatrace metrics pipeline by using [`DynatracePipelineBuilder::metrics()`]. -//! -//! ```no_run -//! use opentelemetry::runtime; -//! use opentelemetry_sdk::export::metrics::aggregation::cumulative_temporality_selector; -//! use opentelemetry_sdk::metrics::selectors; -//! use opentelemetry_sdk::util::tokio_interval_stream; -//! use opentelemetry_dynatrace::ExportConfig; -//! -//! fn main() -> Result<(), Box> { -//! // First, create a Dynatrace exporter builder. This is a minimal example. The exporter -//! // will try to connect to the local OneAgent by default, if no endpoint is set. -//! let dynatrace_exporter = opentelemetry_dynatrace::new_exporter(); -//! -//! // Then pass the exporter into pipeline builder -//! let meter = opentelemetry_dynatrace::new_pipeline() -//! .metrics( -//! selectors::simple::inexpensive(), -//! cumulative_temporality_selector(), -//! runtime::Tokio, -//! ) -//! .with_exporter(dynatrace_exporter) -//! .build(); -//! -//! Ok(()) -//! } -//! ``` -//! -//! # Kitchen Sink Full Configuration -//! -//! Example showing how to override all configuration options. -//! -//! Generally there are two parts of configuration. One part is metrics configuration. -//! Users can set metrics configuration using [`DynatraceMetricsPipeline`]. The other part is the -//! exporter configuration. Users can set the exporter configuration using [`ExportConfig`]. -//! -//! ``` -//! # #[cfg(feature = "reqwest-client")] { -//! use opentelemetry::runtime; -//! use opentelemetry_sdk::metrics::selectors; -//! use opentelemetry_sdk::export::metrics::aggregation::cumulative_temporality_selector; -//! use opentelemetry::KeyValue; -//! use opentelemetry_dynatrace::transform::DimensionSet; -//! use opentelemetry_dynatrace::ExportConfig; -//! use std::collections::HashMap; -//! use std::time::Duration; -//! -//! fn main() -> Result<(), Box> { -//! let export_config = ExportConfig { -//! endpoint: Some("https://example.live.dynatrace.com/api/v2/metrics/ingest".to_string()), -//! token: Some("*****".to_string()) -//! }; -//! -//! let meter = opentelemetry_dynatrace::new_pipeline() -//! .metrics( -//! selectors::simple::inexpensive(), -//! cumulative_temporality_selector(), -//! runtime::Tokio, -//! ) -//! .with_exporter( -//! opentelemetry_dynatrace::new_exporter() -//! .with_export_config( -//! export_config -//! // The export config can also be set by using the with_* functions -//! .with_endpoint("https://example.live.dynatrace.com/api/v2/metrics/ingest") -//! .with_token("*****".to_string()) -//! ) -//! .with_headers(HashMap::from([ -//! (http::header::USER_AGENT.to_string(), "custom-ua-string".to_string()), -//! ])) -//! ) -//! // Send metric data in batches every 3 seconds -//! .with_period(Duration::from_secs(3)) -//! .with_timeout(Duration::from_secs(10)) -//! //Prefix all metric data keys with a custom prefix -//! .with_prefix("quickstart".to_string()) -//! // Key value pairs that will be added to all metric data -//! .with_default_dimensions(DimensionSet::from(vec![ -//! KeyValue::new("version", env!("CARGO_PKG_VERSION")), -//! ])) -//! .build(); -//! -//! Ok(()) -//! } -//! # } -//! ``` -//! [Dynatrace]: https://www.dynatrace.com/ -//! [Dynatrace Metrics ingestion protocol]: https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol/ -//! [Dynatrace documentation for Rust]: https://www.dynatrace.com/support/help/extend-dynatrace/opentelemetry/opentelemetry-ingest/opent-rust/ -//! [`opentelemetry-otlp`]: https://crates.io/crates/opentelemetry-otlp -#![warn( - future_incompatible, - missing_debug_implementations, - missing_docs, - nonstandard_style, - rust_2018_idioms, - unreachable_pub, - unused -)] -#![allow(elided_lifetimes_in_paths)] -#![cfg_attr( - docsrs, - feature(doc_cfg, doc_auto_cfg), - deny(rustdoc::broken_intra_doc_links) -)] -#![cfg_attr(test, deny(warnings))] -mod exporter; - -#[cfg(feature = "metrics")] -mod metric; - -pub mod transform; -pub use crate::exporter::ExportConfig; - -#[cfg(feature = "metrics")] -pub use crate::metric::{DynatraceMetricsPipeline, MetricsExporter}; - -use opentelemetry_http::HttpClient; -use opentelemetry_sdk::export::ExportError; -use std::collections::HashMap; - -/// Dynatrace pipeline builder. -#[derive(Debug)] -pub struct DynatracePipelineBuilder; - -/// Configuration of the http transport. -#[derive(Debug)] -#[cfg_attr( - all( - not(feature = "reqwest-blocking-client"), - not(feature = "reqwest-client"), - not(feature = "surf-client"), - not(feature = "isahc-client"), - ), - derive(Default) -)] -pub struct HttpConfig { - /// Default http client to be used for outbound requests. - pub client: Option>, - - /// Additional http headers to be set when communicating with the outbound endpoint. - pub headers: Option>, -} - -#[cfg(any( - feature = "reqwest-blocking-client", - feature = "reqwest-client", - feature = "surf-client", - feature = "isahc-client", -))] -impl Default for HttpConfig { - fn default() -> Self { - HttpConfig { - #[cfg(feature = "reqwest-client")] - client: Some(Box::new(reqwest::Client::new())), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "surf-client"), - not(feature = "isahc-client"), - feature = "reqwest-blocking-client" - ))] - client: Some(Box::new(reqwest::blocking::Client::new())), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client"), - not(feature = "isahc-client"), - feature = "surf-client" - ))] - client: Some(Box::new(surf::Client::new())), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client"), - not(feature = "surf-client"), - feature = "isahc-client" - ))] - client: Some(Box::new(isahc::HttpClient::new().unwrap())), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client"), - not(feature = "surf-client"), - not(feature = "isahc-client") - ))] - client: None, - headers: None, - } - } -} - -/// Dynatrace exporter builder. -#[derive(Debug)] -pub struct DynatraceExporterBuilder { - pub(crate) export_config: ExportConfig, - pub(crate) http_config: HttpConfig, -} - -impl Default for DynatraceExporterBuilder { - fn default() -> Self { - DynatraceExporterBuilder { - http_config: HttpConfig::default(), - export_config: ExportConfig { - ..ExportConfig::default() - }, - } - } -} - -impl DynatraceExporterBuilder { - /// Set the http client to be used for outbound requests. - pub fn with_http_client(mut self, client: T) -> Self { - self.http_config.client = Some(Box::new(client)); - self - } - - /// Set additional http headers to to be sent when communicating with the outbound endpoint. - pub fn with_headers(mut self, headers: HashMap) -> Self { - self.http_config.headers = Some(headers); - self - } - - /// Set the export config. This will override all previous configuration. - pub fn with_export_config(mut self, export_config: ExportConfig) -> Self { - self.export_config = export_config; - self - } -} - -/// Create a new pipeline builder with the default configuration. -/// -/// ## Examples -/// -/// ```no_run -/// use opentelemetry::runtime; -/// use opentelemetry_sdk::export::metrics::aggregation::cumulative_temporality_selector; -/// use opentelemetry_sdk::metrics::selectors; -/// # fn main() -> Result<(), Box> { -/// let meter = opentelemetry_dynatrace::new_pipeline() -/// .metrics( -/// selectors::simple::inexpensive(), -/// cumulative_temporality_selector(), -/// runtime::Tokio, -/// ); -/// # Ok(()) -/// # } -/// ``` -pub fn new_pipeline() -> DynatracePipelineBuilder { - DynatracePipelineBuilder -} - -/// Create a new `DynatraceExporterBuilder` with the default configuration. -/// -/// ## Examples -/// -/// ```no_run -/// # fn main() -> Result<(), Box> { -/// let dynatrace_exporter = opentelemetry_dynatrace::new_exporter(); -/// # Ok(()) -/// } -/// ``` -pub fn new_exporter() -> DynatraceExporterBuilder { - DynatraceExporterBuilder::default() -} - -/// Wrap type for errors from this crate. -#[derive(thiserror::Error, Debug)] -pub enum Error { - /// The pipeline will need a exporter to complete setup. Throw this error if none is provided. - #[error("no exporter builder is provided, please provide one using with_exporter() method")] - NoExporterBuilder, - - /// Invalid URI. - #[error("invalid URI {0}")] - InvalidUri(#[from] http::uri::InvalidUri), - - /// Http requests failed because no http client is provided. - #[error( - "no http client, you must select one from features or provide your own implementation" - )] - NoHttpClient, - - /// Http requests failed. - #[error("http request failed with {0}")] - RequestFailed(#[from] http::Error), - - /// The provided value is invalid in http headers. - #[error("http header value error {0}")] - InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), - - /// The provided name is invalid in http headers. - #[error("http header name error {0}")] - InvalidHeaderName(#[from] http::header::InvalidHeaderName), - - /// The lock in exporters has been poisoned. - #[cfg(feature = "metrics")] - #[error("the lock of the {0} has been poisoned")] - PoisonedLock(&'static str), -} - -impl ExportError for Error { - fn exporter_name(&self) -> &'static str { - "dynatrace" - } -} diff --git a/opentelemetry-dynatrace/src/metric.rs b/opentelemetry-dynatrace/src/metric.rs deleted file mode 100644 index 7f9a56d8ec..0000000000 --- a/opentelemetry-dynatrace/src/metric.rs +++ /dev/null @@ -1,448 +0,0 @@ -//! Dynatrace Metric Exporter. -//! -//! Defines an `Exporter` to send metric data to Dynatrace using the [Dynatrace Metrics ingestion protocol]. -//! -//! [Metrics ingestion protocol]: https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol/ -#![allow(unused_attributes)] -use crate::exporter::ExportConfig; -use crate::transform::record_to_metric_line; -use crate::transform::{DimensionSet, MetricLine}; -use crate::{DynatraceExporterBuilder, DynatracePipelineBuilder, Error}; -use http::{ - header::{HeaderName, HeaderValue, AUTHORIZATION, CONTENT_TYPE, USER_AGENT}, - Method, Uri, Version, -}; -use opentelemetry::metrics::Result; -use opentelemetry::{global, Context}; -use opentelemetry_http::HttpClient; -use opentelemetry_sdk::export::metrics::aggregation::{ - AggregationKind, Temporality, TemporalitySelector, -}; -use opentelemetry_sdk::export::metrics::{AggregatorSelector, InstrumentationLibraryReader}; -use opentelemetry_sdk::metrics::controllers::BasicController; -use opentelemetry_sdk::metrics::sdk_api::Descriptor; -use opentelemetry_sdk::metrics::{controllers, processors}; -use opentelemetry_sdk::runtime::Runtime; -use opentelemetry_sdk::{export::metrics, Resource}; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::fmt::{Debug, Formatter, Write}; -use std::time; - -#[cfg(any(feature = "rt-tokio", feature = "rt-async-std"))] -use std::sync::{Arc, Mutex}; - -/// The default Dynatrace OneAgent endpoint. -const DEFAULT_ONEAGENT_ENDPOINT: &str = "http://localhost:14499/metrics/ingest"; - -/// The default user agent string. -const DEFAULT_USER_AGENT: &str = "opentelemetry-metric-rust"; - -impl DynatracePipelineBuilder { - /// Create a Dynatrace metrics pipeline. - pub fn metrics( - self, - aggregator_selector: AS, - temporality_selector: TS, - rt: RT, - ) -> DynatraceMetricsPipeline - where - AS: AggregatorSelector + Send + Sync, - TS: TemporalitySelector + Clone + Send + Sync, - RT: Runtime, - { - DynatraceMetricsPipeline { - rt, - aggregator_selector, - temporality_selector, - exporter_pipeline: None, - resource: None, - period: None, - timeout: None, - prefix: None, - default_dimensions: None, - timestamp: true, - } - } -} - -#[derive(Debug)] -#[non_exhaustive] -pub struct MetricsExporterBuilder { - builder: DynatraceExporterBuilder, -} - -impl MetricsExporterBuilder { - /// Build a Dynatrace metrics exporter with given configuration. - fn build_metrics_exporter( - self, - temporality_selector: TS, - prefix: Option, - default_dimensions: Option, - timestamp: bool, - ) -> Result - where - TS: TemporalitySelector + Clone + Sync + Send + 'static, - { - MetricsExporter::new::( - self.builder.export_config, - self.builder.http_config.client.unwrap(), - self.builder.http_config.headers, - prefix, - default_dimensions, - timestamp, - temporality_selector, - ) - } -} - -impl From for MetricsExporterBuilder { - fn from(exporter: DynatraceExporterBuilder) -> Self { - MetricsExporterBuilder { builder: exporter } - } -} - -/// Pipeline to build Dynatrace metrics exporter. -#[derive(Debug)] -pub struct DynatraceMetricsPipeline -where - AS: AggregatorSelector + Send + Sync + 'static, - TS: TemporalitySelector + Clone + Send + Sync + 'static, - RT: Runtime, -{ - rt: RT, - aggregator_selector: AS, - temporality_selector: TS, - exporter_pipeline: Option, - resource: Option, - period: Option, - timeout: Option, - prefix: Option, - default_dimensions: Option, - timestamp: bool, -} - -impl DynatraceMetricsPipeline -where - AS: AggregatorSelector + Send + Sync + 'static, - TS: TemporalitySelector + Clone + Send + Sync + 'static, - RT: Runtime, -{ - /// Build with resource - pub fn with_resource(self, resource: Resource) -> Self { - DynatraceMetricsPipeline { - resource: Some(resource), - ..self - } - } - - /// Build with an exporter. - pub fn with_exporter>(self, pipeline: B) -> Self { - DynatraceMetricsPipeline { - exporter_pipeline: Some(pipeline.into()), - ..self - } - } - - /// Build with a timeout. - pub fn with_timeout(self, timeout: time::Duration) -> Self { - DynatraceMetricsPipeline { - timeout: Some(timeout), - ..self - } - } - - /// Set the frequency in which metric data is exported. - pub fn with_period(self, period: time::Duration) -> Self { - DynatraceMetricsPipeline { - period: Some(period), - ..self - } - } - - /// Set the prefix to prepend to all metric data. - pub fn with_prefix(self, prefix: String) -> Self { - DynatraceMetricsPipeline { - prefix: Some(prefix), - ..self - } - } - - /// Set default dimensions to all metric data. - pub fn with_default_dimensions(self, default_dimensions: DimensionSet) -> Self { - DynatraceMetricsPipeline { - default_dimensions: Some(default_dimensions), - ..self - } - } - - /// Set the timestamp to all metric data. - /// If disabled, the ingestion time of the Dynatrace server will be used automatically. - /// Adding timestamps should be disabled in environments, where the system time is unreliable. - pub fn with_timestamp(self, value: bool) -> Self { - DynatraceMetricsPipeline { - timestamp: value, - ..self - } - } - - /// Build the push controller. - pub fn build(self) -> Result { - let exporter = self - .exporter_pipeline - .ok_or(Error::NoExporterBuilder)? - .build_metrics_exporter( - self.temporality_selector.clone(), - self.prefix, - self.default_dimensions, - self.timestamp, - )?; - - let mut builder = controllers::basic(processors::factory( - self.aggregator_selector, - self.temporality_selector, - )) - .with_exporter(exporter); - - if let Some(period) = self.period { - builder = builder.with_collect_period(period); - } - if let Some(timeout) = self.timeout { - builder = builder.with_collect_timeout(timeout) - } - if let Some(resource) = self.resource { - builder = builder.with_resource(resource); - } - let controller = builder.build(); - controller.start(&Context::current(), self.rt)?; - - global::set_meter_provider(controller.clone()); - - Ok(controller) - } -} - -enum ClientMessage { - Export(Box>>), - Shutdown, -} - -/// Dynatrace metrics exporter. -pub struct MetricsExporter { - #[cfg(feature = "rt-tokio")] - sender: Arc>>, - - #[cfg(all(not(feature = "rt-tokio"), feature = "rt-async-std"))] - sender: Arc>>, - - endpoint: Uri, - - token: Option, - - headers: Option>, - - prefix: Option, - - default_dimensions: Option, - - timestamp: bool, - - temporality_selector: Arc, -} - -impl Debug for MetricsExporter { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Dynatrace Metrics Exporter").finish() - } -} - -impl MetricsExporter { - /// Create a new `MetricsExporter`. - pub fn new( - export_config: ExportConfig, - client: Box, - headers: Option>, - prefix: Option, - default_dimensions: Option, - timestamp: bool, - temporality_selector: T, - ) -> Result { - let uri: Uri = if let Some(endpoint) = export_config.endpoint { - endpoint.parse() - } else { - DEFAULT_ONEAGENT_ENDPOINT.parse() - } - .map_err::(Into::into)?; - - #[cfg(feature = "rt-tokio")] - let (sender, mut receiver) = tokio::sync::mpsc::channel::(2); - - #[cfg(feature = "rt-tokio")] - tokio::spawn(Box::pin(async move { - while let Some(msg) = receiver.recv().await { - match msg { - ClientMessage::Export(req) => { - let _ = client.send(*req).await; - } - ClientMessage::Shutdown => { - break; - } - } - } - })); - - #[cfg(all(not(feature = "rt-tokio"), feature = "rt-async-std"))] - let (sender, mut receiver) = futures_channel::mpsc::channel::(2); - - #[cfg(all(not(feature = "rt-tokio"), feature = "rt-async-std"))] - async_std::task::spawn(Box::pin(async move { - loop { - match receiver.try_next() { - Err(_) => break, - Ok(result) => match result { - None => continue, - Some(msg) => match msg { - ClientMessage::Export(req) => { - let _ = client.send(*req).await; - } - ClientMessage::Shutdown => break, - }, - }, - } - } - })); - - Ok(MetricsExporter { - sender: Arc::new(Mutex::new(sender)), - endpoint: uri, - token: export_config.token, - headers, - prefix, - default_dimensions, - timestamp, - temporality_selector: Arc::new(temporality_selector), - }) - } -} - -impl TemporalitySelector for MetricsExporter { - fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality { - self.temporality_selector.temporality_for(descriptor, kind) - } -} - -impl metrics::MetricsExporter for MetricsExporter { - /// Export metric data to Dynatrace - /// - fn export( - &self, - _cx: &Context, - _res: &Resource, - reader: &dyn InstrumentationLibraryReader, - ) -> Result<()> { - let mut metric_line_data: Vec = Vec::default(); - reader.try_for_each(&mut |_lib, reader| { - reader.try_for_each(self, &mut |record| match record_to_metric_line( - record, - self.temporality_selector.as_ref(), - self.prefix.clone(), - self.default_dimensions.clone(), - self.timestamp, - ) { - Ok(metric_line) => { - metric_line_data.extend(metric_line); - Ok(()) - } - Err(err) => Err(err), - }) - })?; - - if metric_line_data.is_empty() { - Ok(()) - } else { - metric_line_data - // Send chunks of 1000 metric line data elements - .chunks(1000) - .try_for_each(|metric_line_data| { - // Transform the metric line data elements to strings - let metric_lines = metric_line_data - .iter() - .enumerate() - .fold(String::new(), |mut acc, (idx, value)| { - let offset = acc.len(); - if idx > 0 { - acc.push('\n'); - } - - if write!(acc, "{}", value).is_err() { - acc.truncate(offset); - } - - acc - }) - .as_bytes() - .to_vec(); - - // Create a new http request - let mut req = http::Request::builder() - .method(Method::POST) - .uri(self.endpoint.clone()) - .header(CONTENT_TYPE, "text/plain") - .header(USER_AGENT, DEFAULT_USER_AGENT) - .version(Version::HTTP_11) - .body(metric_lines) - .map_err::(Into::into)?; - - if let Some(token) = self.token.clone() { - let token = format!("Api-Token {}", token); - - let value = - HeaderValue::from_str(&token).map_err::(Into::into)?; - req.headers_mut().insert(AUTHORIZATION, value); - } - - if let Some(headers) = self.headers.clone() { - for (key, value) in headers { - let key = HeaderName::try_from(&key) - .map_err::(Into::into)?; - let value = HeaderValue::from_str(value.as_ref()) - .map_err::(Into::into)?; - req.headers_mut().insert(key, value); - } - } - - #[cfg(feature = "rt-tokio")] - self.sender - .lock() - .map(|sender| { - let _ = sender.try_send(ClientMessage::Export(Box::new(req))); - }) - .map_err(|_| Error::PoisonedLock("dynatrace metrics exporter"))?; - - #[cfg(all(not(feature = "rt-tokio"), feature = "rt-async-std"))] - self.sender - .lock() - .map(|mut sender| { - let _ = sender.try_send(ClientMessage::Export(Box::new(req))); - }) - .map_err(|_| Error::PoisonedLock("dynatrace metrics exporter"))?; - - Ok(()) - }) - } - } -} - -impl Drop for MetricsExporter { - fn drop(&mut self) { - #[cfg(feature = "rt-tokio")] - let _sender_lock_guard = self.sender.lock().map(|sender| { - let _ = sender.try_send(ClientMessage::Shutdown); - }); - - #[cfg(all(not(feature = "rt-tokio"), feature = "rt-async-std"))] - let _sender_lock_guard = self.sender.lock().map(|mut sender| { - let _ = sender.try_send(ClientMessage::Shutdown); - }); - } -} diff --git a/opentelemetry-dynatrace/src/transform/common.rs b/opentelemetry-dynatrace/src/transform/common.rs deleted file mode 100644 index 6925188417..0000000000 --- a/opentelemetry-dynatrace/src/transform/common.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -/// Return the numeric value corresponding to the time for the specified date -/// according to universal time. The value returned is the number of milliseconds -/// since 1 January 1970 00:00:00. -pub(crate) fn get_time(time: SystemTime) -> u64 { - time.duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_millis() as u64 -} - -#[cfg(test)] -mod tests { - use super::*; - use std::time::{Duration, UNIX_EPOCH}; - - #[test] - fn test_get_time() { - assert_eq!(0, get_time(UNIX_EPOCH)); - assert_eq!(0, get_time(UNIX_EPOCH + Duration::from_nanos(1))); - assert_eq!(1, get_time(UNIX_EPOCH + Duration::from_millis(1))); - assert_eq!(1000, get_time(UNIX_EPOCH + Duration::from_secs(1))); - } -} diff --git a/opentelemetry-dynatrace/src/transform/metrics.rs b/opentelemetry-dynatrace/src/transform/metrics.rs deleted file mode 100644 index 585659434a..0000000000 --- a/opentelemetry-dynatrace/src/transform/metrics.rs +++ /dev/null @@ -1,987 +0,0 @@ -//! OpenTelemetry Dynatrace Metrics -use crate::transform::common::get_time; -use opentelemetry::attributes::merge_iters; -use opentelemetry::metrics::MetricsError; -use opentelemetry::{global, Key, KeyValue, Value}; -use opentelemetry_sdk::export::metrics::aggregation::{Count, Temporality, TemporalitySelector}; -use opentelemetry_sdk::metrics::aggregators::{ - HistogramAggregator, LastValueAggregator, SumAggregator, -}; -use opentelemetry_sdk::{ - export::metrics::{ - aggregation::{Histogram as SdkHistogram, LastValue, Sum as SdkSum}, - Record, - }, - metrics::sdk_api::{Number, NumberKind}, -}; -use std::borrow::Cow; -use std::cmp; -use std::collections::{btree_map, BTreeMap}; -use std::fmt; -use std::fmt::Write; -use std::iter::{self, FromIterator}; - -/// Source of the metric data. -const METRICS_SOURCE: Key = Key::from_static_str("dt.metrics.source"); - -/// Dynatrace metric ingestion protocol line key. -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct MetricKey(Cow<'static, str>); - -impl MetricKey { - /// Create a new `MetricKey`. - pub fn new>>(value: S) -> Self { - MetricKey(value.into()) - } - - /// Create a new const `MetricKey`. - pub const fn from_static_str(value: &'static str) -> Self { - MetricKey(Cow::Borrowed(value)) - } - - /// Returns a reference to the underlying key name. - pub fn as_str(&self) -> &str { - self.0.as_ref() - } -} - -impl From<&'static str> for MetricKey { - /// Convert a `&str` to a `MetricKey`. - fn from(key_str: &'static str) -> Self { - MetricKey(Cow::from(key_str)) - } -} - -impl From for MetricKey { - /// Convert a `String` to a `MetricKey`. - fn from(string: String) -> Self { - MetricKey(Cow::from(string)) - } -} - -impl From for String { - /// Converts `MetricKey` instances into `String`. - fn from(key: MetricKey) -> Self { - key.0.into_owned() - } -} - -impl fmt::Display for MetricKey { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut prev_char_underscore = false; - let mut escaped = self - .0 - .as_ref() - .chars() - .filter_map(|c| { - if c == '.' || c == '-' || c == '_' || c.is_numeric() || c.is_ascii_alphabetic() { - prev_char_underscore = false; - Some(c) - } else if !prev_char_underscore { - prev_char_underscore = true; - Some('_') - } else { - None - } - }) - .peekable(); - - // The maximum metric key length is 250 characters - if escaped - .peek() - .map_or(false, |c| c == &'_' || c.is_ascii_alphabetic()) - { - fmt.write_str(&escaped.take(250).collect::())?; - } else { - // The metric key starts with a non-ASCII alphabetic character and needs to be prefixed - // with an underscore - fmt.write_str(&"_".chars().chain(escaped.take(249)).collect::())?; - } - - Ok(()) - } -} - -/// An immutable set of distinct metric dimensions. -#[derive(Clone, Debug, Default)] -pub struct DimensionSet { - dimensions: BTreeMap, -} - -impl DimensionSet { - /// The dimension set length. - pub fn len(&self) -> usize { - self.dimensions.len() - } - - /// Check if the set of dimensions is empty. - pub fn is_empty(&self) -> bool { - self.dimensions.is_empty() - } - - /// Iterate over the dimension key value pairs. - pub fn iter(&self) -> Iter<'_> { - self.into_iter() - } -} - -impl fmt::Display for DimensionSet { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let val = self - .iter() - .enumerate() - .fold(String::new(), |mut acc, (idx, (key, value))| { - let offset = acc.len(); - if idx > 0 { - acc.push(',') - } - - let mut prev_char_underscore = false; - - // The maximum dimension key length is 100 characters - let key = key - .as_str() - .chars() - .filter_map(|c| { - if c == '.' - || c == '-' - || c == '_' - || c.is_numeric() - || c.is_ascii_alphabetic() - { - prev_char_underscore = false; - Some(c) - } else if !prev_char_underscore { - prev_char_underscore = true; - Some('_') - } else { - None - } - }) - .take(100) - .collect::() - .to_lowercase(); - - if write!(acc, "{}", key).is_err() { - acc.truncate(offset); - return acc; - } - - acc.push('='); - - prev_char_underscore = false; - - // The maximum dimension value length is 255 characters - let value = value - .as_str() - .chars() - .filter_map(|c| { - if c.is_numeric() || c.is_ascii() { - prev_char_underscore = false; - Some(c) - } else if !prev_char_underscore { - prev_char_underscore = true; - Some('_') - } else { - None - } - }) - .take(255) - .collect::(); - - if write!(acc, "{}", value).is_err() { - acc.truncate(offset); - return acc; - } - - acc - }); - - fmt.write_str(&val)?; - - Ok(()) - } -} - -impl PartialEq for DimensionSet { - fn eq(&self, other: &Self) -> bool { - self.dimensions.iter().eq(other.iter()) - } -} - -impl From> for DimensionSet { - fn from(collection: Vec) -> Self { - DimensionSet { - dimensions: collection - .into_iter() - .map(|kv| (kv.key, kv.value)) - .collect(), - } - } -} - -impl From> for DimensionSet { - fn from(collection: Vec<(Key, Value)>) -> Self { - let mut dimensions = BTreeMap::new(); - for (key, value) in collection.into_iter() { - dimensions.insert(key, value); - } - DimensionSet { dimensions } - } -} - -impl FromIterator for DimensionSet { - fn from_iter>(iter: I) -> Self { - let mut dimensions = BTreeMap::new(); - for kv in iter { - dimensions.insert(kv.key, kv.value); - } - DimensionSet { dimensions } - } -} - -impl FromIterator<(Key, Value)> for DimensionSet { - fn from_iter>(iter: I) -> Self { - let mut dimensions = BTreeMap::new(); - for (key, value) in iter { - dimensions.insert(key, value); - } - DimensionSet { dimensions } - } -} - -/// An iterator over the entries of a `DimensionSet`. -#[derive(Debug)] -pub struct Iter<'a>(btree_map::Iter<'a, Key, Value>); - -impl<'a> Iterator for Iter<'a> { - type Item = (&'a Key, &'a Value); - - fn next(&mut self) -> Option { - self.0.next() - } -} - -impl<'a> IntoIterator for &'a DimensionSet { - type Item = (&'a Key, &'a Value); - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Self::IntoIter { - Iter(self.dimensions.iter()) - } -} - -/// [Dynatrace metrics ingestion protocol line]. -/// -/// [Dynatrace metrics ingestion protocol line]: https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol/ -#[derive(Clone, Debug)] -pub struct MetricLine { - kind: NumberKind, - key: MetricKey, - dimensions: Option, - min: Option, - max: Option, - sum: Option, - count: Option, - delta: Option, - gauge: Option, - timestamp: Option, -} - -impl MetricLine { - /// Create a new `MetricLine`. - pub fn new(key: MetricKey, kind: NumberKind) -> Self { - MetricLine { - key, - kind, - dimensions: None, - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: None, - timestamp: None, - } - } - - /// Common attributes that apply to this metric line. - pub fn dimensions(mut self, dimensions: Option) -> Self { - self.dimensions = dimensions; - self - } - - /// The min value. - pub fn min(mut self, min: Option) -> Self { - self.min = min; - self - } - - /// The max value. - pub fn max(mut self, max: Option) -> Self { - self.max = max; - self - } - - /// The sum value. - pub fn sum(mut self, sum: Option) -> Self { - self.sum = sum; - self - } - - /// The count value. - pub fn count(mut self, count: Option) -> Self { - self.count = count; - self - } - - /// The delta value. - pub fn delta(mut self, delta: Option) -> Self { - self.delta = delta; - self - } - - /// The gauge value. - pub fn gauge(mut self, gauge: Option) -> Self { - self.gauge = gauge; - self - } - - /// The timestamp in UTC milliseconds. - /// Allowed range is between 1 hour into the past and 10 minutes into the future from now. - /// If no timestamp is provided, the ingestion time of the Dynatrace server will be used automatically. - pub fn timestamp(mut self, timestamp: Option) -> Self { - self.timestamp = timestamp; - self - } -} - -impl fmt::Display for MetricLine { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(&format!("{}", &self.key))?; - fmt.write_char(',')?; - - if let Some(dimensions) = self.dimensions.to_owned() { - fmt.write_str(&format!("{}", dimensions))?; - } - - if self.min.is_some() && self.max.is_some() && self.sum.is_some() && self.count.is_some() { - let min = self.min.to_owned().unwrap(); - let max = self.max.to_owned().unwrap(); - let sum = self.sum.to_owned().unwrap(); - - fmt.write_char(' ')?; - if min.partial_cmp(&self.kind, &sum) == Some(cmp::Ordering::Equal) - && max.partial_cmp(&self.kind, &sum) == Some(cmp::Ordering::Equal) - && self.count == Some(1) - { - fmt.write_fmt(format_args!("gauge,{}", convert(&self.kind, sum)))?; - } else { - fmt.write_fmt(format_args!( - "gauge,min={},max={},sum={},count={}", - convert(&self.kind, min), - convert(&self.kind, max), - sum.to_f64(&self.kind), - self.count.to_owned().unwrap(), - ))?; - } - } else if let Some(delta) = self.delta.to_owned() { - fmt.write_char(' ')?; - fmt.write_fmt(format_args!("count,delta={}", convert(&self.kind, delta)))?; - } else if let Some(gauge) = self.gauge.to_owned() { - fmt.write_char(' ')?; - fmt.write_fmt(format_args!("gauge,{}", convert(&self.kind, gauge)))?; - } - - if let Some(timestamp) = self.timestamp.to_owned() { - fmt.write_char(' ')?; - fmt.write_str(×tamp.to_string())?; - } - - Ok(()) - } -} - -impl PartialEq for MetricLine { - fn eq(&self, other: &Self) -> bool { - self.kind.eq(&other.kind) - && self.key.eq(&other.key) - && match (self.dimensions.clone(), other.dimensions.clone()) { - (Some(a), Some(b)) => a.eq(&b), - (None, None) => true, - _ => false, - } - && match (self.min.clone(), other.min.clone()) { - (Some(a), Some(b)) => a.partial_cmp(&self.kind, &b) == Some(cmp::Ordering::Equal), - (None, None) => true, - _ => false, - } - && match (self.max.clone(), other.max.clone()) { - (Some(a), Some(b)) => a.partial_cmp(&self.kind, &b) == Some(cmp::Ordering::Equal), - (None, None) => true, - _ => false, - } - && match (self.sum.clone(), other.sum.clone()) { - (Some(a), Some(b)) => a.partial_cmp(&self.kind, &b) == Some(cmp::Ordering::Equal), - (None, None) => true, - _ => false, - } - && self.count.eq(&other.count) - && match (self.delta.clone(), other.delta.clone()) { - (Some(a), Some(b)) => a.partial_cmp(&self.kind, &b) == Some(cmp::Ordering::Equal), - (None, None) => true, - _ => false, - } - && match (self.gauge.clone(), other.gauge.clone()) { - (Some(a), Some(b)) => a.partial_cmp(&self.kind, &b) == Some(cmp::Ordering::Equal), - (None, None) => true, - _ => false, - } - && self.timestamp.eq(&other.timestamp) - } -} - -/// Transform a record to a Dynatrace metrics ingestion protocol metric line. -pub(crate) fn record_to_metric_line( - record: &Record, - temporality_selector: &dyn TemporalitySelector, - prefix: Option, - default_dimensions: Option, - timestamp: bool, -) -> Result, MetricsError> { - let aggregator = record.aggregator().ok_or(MetricsError::NoDataCollected)?; - let descriptor = record.descriptor(); - - let kind = descriptor.number_kind(); - - let key = if prefix.is_some() { - MetricKey::new(format!("{}.{}", prefix.unwrap(), descriptor.name())) - } else { - MetricKey::new(descriptor.name().to_string()) - }; - - let source_key = METRICS_SOURCE; - let source_value = Value::String("opentelemetry".into()); - - let iter = record - .attributes() - .iter() - .chain(iter::once((&source_key, &source_value))); - let dimensions = if let Some(default_dimensions) = default_dimensions { - DimensionSet::from_iter( - merge_iters(default_dimensions.iter(), iter).map(|(k, v)| (k.to_owned(), v.to_owned())), - ) - } else { - DimensionSet::from_iter(iter.map(|(k, v)| (k.to_owned(), v.to_owned()))) - }; - - let temporality = - temporality_selector.temporality_for(descriptor, aggregator.aggregation().kind()); - - let mut metric_line_data: Vec = Vec::with_capacity(1); - - if let Some(last_value) = aggregator.as_any().downcast_ref::() { - let (val, sample_time) = last_value.last_value()?; - let timestamp = if timestamp { - Some(get_time(sample_time)) - } else { - None - }; - - metric_line_data.push(MetricLine { - kind: kind.to_owned(), - key, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(val), - timestamp, - }); - } else if let Some(sum) = aggregator.as_any().downcast_ref::() { - let val = sum.sum()?; - let timestamp = if timestamp { - Some(get_time(record.end_time().to_owned())) - } else { - None - }; - - let mut metric_line = MetricLine { - kind: kind.to_owned(), - key, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: None, - timestamp, - }; - - match temporality { - Temporality::Cumulative => metric_line.gauge = Some(val), - Temporality::Delta => metric_line.delta = Some(val), - other => global::handle_error(MetricsError::Other(format!( - "Unsupported temporality {:?}", - other - ))), - }; - - metric_line_data.push(metric_line); - } else if let Some(histogram) = aggregator.as_any().downcast_ref::() { - let (sum, count, buckets) = (histogram.sum()?, histogram.count()?, histogram.histogram()?); - let (counts, boundaries) = (buckets.counts(), buckets.boundaries()); - - let mut min_idx: i32 = -1; - let mut max_idx: i32 = -1; - - for (i, val) in counts.iter().enumerate() { - if val > &0.0 { - if min_idx == -1 { - min_idx = i as i32; - } - max_idx = i as i32; - } - } - - let min: f64 = if min_idx == -1 { - 0.0 - } else if min_idx == 0 { - boundaries[0] - } else { - boundaries[min_idx as usize - 1] - }; - - let max: f64 = if max_idx as usize == counts.len() - 1 { - boundaries[max_idx as usize - 1] - } else { - boundaries[max_idx as usize] - }; - - let timestamp = if timestamp { - Some(get_time(record.end_time().to_owned())) - } else { - None - }; - - metric_line_data.push(MetricLine { - kind: NumberKind::F64, - key, - dimensions: Some(dimensions), - min: Some(Number::from(min)), - max: Some(Number::from(max)), - sum: Some(Number::from(sum.to_f64(&NumberKind::I64))), - count: Some(count), - delta: None, - gauge: None, - timestamp, - }); - } - - Ok(metric_line_data) -} - -/// Converts the number to a string. -#[inline] -fn convert(kind: &NumberKind, number: Number) -> String { - match &kind { - NumberKind::U64 => number.to_u64(kind).to_string(), - NumberKind::I64 => number.to_i64(kind).to_string(), - NumberKind::F64 => number.to_f64(kind).to_string(), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::transform::common::get_time; - use crate::transform::metrics::MetricLine; - use crate::transform::record_to_metric_line; - use opentelemetry::{attributes::AttributeSet, metrics::MetricsError}; - use opentelemetry::{Context, KeyValue}; - use opentelemetry_sdk::export::metrics::aggregation::{ - cumulative_temporality_selector, delta_temporality_selector, - }; - use opentelemetry_sdk::export::metrics::record; - use opentelemetry_sdk::metrics::aggregators::{ - histogram, last_value, Aggregator, SumAggregator, - }; - use opentelemetry_sdk::metrics::sdk_api::{Descriptor, InstrumentKind, Number, NumberKind}; - use std::borrow::Cow; - use std::sync::Arc; - use std::time::{Duration, SystemTime}; - - #[test] - fn test_key() { - fn key_data() -> Vec<(&'static str, Cow<'static, str>, Cow<'static, str>)> { - vec![ - ( - "keep if containing _-.", - "value.123_foo-bar23_foo-bar".into(), - "value.123_foo-bar23_foo-bar".into(), - ), - ( - "keep if starting with an underscore", - "_test".into(), - "_test".into(), - ), - ( - "replace with an underscore if starting with a digit", - "0123456789".into(), - "_0123456789".into(), - ), - ( - "add an underscore prefix if starting with /", - "/0123456789".into(), - "_0123456789".into(), - ), - ( - "add an underscore prefix if starting with :", - ":0123456789".into(), - "_0123456789".into(), - ), - ( - "add an underscore prefix if starting with ;", - ";0123456789".into(), - "_0123456789".into(), - ), - ( - "prefix with an underscore if starting with a dot", - ".test".into(), - "_.test".into(), - ), - ( - "replace with an underscore if starting with lowercase non-alphabetic character", - "ätest".into(), - "_test".into(), - ), - ( - "replace with an underscore if starting with uppercase non-alphabetic character", - "Ätest".into(), - "_test".into(), - ), - ( - "replace invalid characters", - "test/abc-123".into(), - "test_abc-123".into(), - ), - ( - "skip consecutively following underscores", - "test.äöüß_123".into(), - "test.__123".into(), - ), - ( - "skip replacing invalid characters with consecutively following underscores", - "test.äbc_123".into(), - "test._bc_123".into(), - ), - ( - "limit to 250 characters", - "a".repeat(251).into(), - "a".repeat(250).into(), - ), - ( - "limit to 250 characters with invalid first character", - format!("ä{}", "a".repeat(250)).into(), - format!("_{}", "a".repeat(249)).into(), - ), - ( - "valid input", - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789".into(), - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789".into(), - ), - ( - "valid input starting with an underscore", - "_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789".into(), - "_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789".into(), - ), - ] - } - - for (name, data, sanitized) in key_data() { - assert_eq!( - sanitized, - format!("{}", MetricKey::new(data)), - "{} doesn't match", - name - ) - } - } - - #[test] - fn test_record_to_metric_line() -> Result<(), MetricsError> { - let attributes = [("KEY", "VALUE"), ("test.abc_123-", "value.123_foo-bar")]; - let attribute_set = AttributeSet::from_attributes( - attributes - .iter() - .cloned() - .map(|(k, v)| opentelemetry::KeyValue::new(k, v)), - ); - let start_time = SystemTime::now(); - let end_time = SystemTime::now().checked_add(Duration::new(30, 0)).unwrap(); - let cx = Context::new(); - - // Sum - { - let descriptor = Descriptor::new( - "test_sum".to_string(), - InstrumentKind::Counter, - NumberKind::I64, - None, - None, - ); - let aggregator = SumAggregator::default(); - let val = Number::from(12_i64); - aggregator.update(&cx, &val, &descriptor)?; - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - Some(&wrapped_aggregator), - start_time, - end_time, - ); - - // ExportKindSelector::Cumulative - let metric_line_data = record_to_metric_line( - &record, - &cumulative_temporality_selector(), - None, - None, - true, - )?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry"), - ]); - - let expect = vec![MetricLine { - key: MetricKey::new("test_sum"), - kind: NumberKind::I64, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(Number::from(12_i64)), - timestamp: Some(get_time(end_time)), - }]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_sum,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,12 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!(None, iter.next()); - - // ExportKindSelector::Delta - let metric_line_data = - record_to_metric_line(&record, &delta_temporality_selector(), None, None, true)?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry"), - ]); - - let expect = vec![MetricLine { - key: MetricKey::new("test_sum"), - kind: NumberKind::I64, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: Some(Number::from(12_i64)), - gauge: None, - timestamp: Some(get_time(end_time)), - }]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_sum,key=VALUE,{}={},test.abc_123-=value.123_foo-bar count,delta=12 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!(None, iter.next()); - } - - // Last Value - { - let descriptor = Descriptor::new( - "test_last_value".to_string(), - InstrumentKind::GaugeObserver, - NumberKind::I64, - None, - None, - ); - let aggregator = last_value(); - let val1 = Number::from(12_i64); - let val2 = Number::from(14_i64); - aggregator.update(&cx, &val1, &descriptor)?; - aggregator.update(&cx, &val2, &descriptor)?; - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - Some(&wrapped_aggregator), - start_time, - end_time, - ); - - let metric_line_data = record_to_metric_line( - &record, - &cumulative_temporality_selector(), - None, - None, - false, - )?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry"), - ]); - - let expect = vec![MetricLine { - key: MetricKey::new("test_last_value"), - kind: NumberKind::I64, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(Number::from(14_i64)), - timestamp: None, - }]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_last_value,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,14", - METRICS_SOURCE, "opentelemetry", - )), - iter.next() - ); - assert_eq!(None, iter.next()); - } - - // Histogram - { - let descriptor = Descriptor::new( - "test_histogram".to_string(), - InstrumentKind::Histogram, - NumberKind::I64, - None, - None, - ); - let bound = [0.1, 0.2, 0.3]; - let aggregator = histogram(&bound); - let vals = vec![1i64.into(), 2i64.into(), 3i64.into()]; - for val in vals.iter() { - aggregator.update(&cx, val, &descriptor)?; - } - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - Some(&wrapped_aggregator), - start_time, - end_time, - ); - - let metric_line_data = record_to_metric_line( - &record, - &cumulative_temporality_selector(), - None, - None, - true, - )?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry"), - ]); - - let expect = vec![MetricLine { - key: MetricKey::new("test_histogram"), - kind: NumberKind::F64, - dimensions: Some(dimensions), - min: Some(Number::from(0.3_f64)), - max: Some(Number::from(0.3_f64)), - sum: Some(Number::from(6_f64)), - count: Some(3), - delta: None, - gauge: None, - timestamp: Some(get_time(end_time)), - }]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_histogram,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,min=0.3,max=0.3,sum=6,count=3 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!(None, iter.next()); - } - - Ok(()) - } -} diff --git a/opentelemetry-dynatrace/src/transform/mod.rs b/opentelemetry-dynatrace/src/transform/mod.rs deleted file mode 100644 index 9f064e6f8d..0000000000 --- a/opentelemetry-dynatrace/src/transform/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! OpenTelemetry Dynatrace Transform -mod common; - -#[cfg(feature = "metrics")] -mod metrics; - -#[cfg(feature = "metrics")] -pub use metrics::{DimensionSet, MetricKey, MetricLine}; - -#[cfg(feature = "metrics")] -pub(crate) use metrics::record_to_metric_line; diff --git a/opentelemetry-dynatrace/tests/http_test.rs b/opentelemetry-dynatrace/tests/http_test.rs deleted file mode 100644 index 7baad7240f..0000000000 --- a/opentelemetry-dynatrace/tests/http_test.rs +++ /dev/null @@ -1,185 +0,0 @@ -#[cfg(all(feature = "metrics", feature = "rt-tokio"))] -mod test { - use futures_util::future::BoxFuture; - use http::header::{HeaderValue, AUTHORIZATION, USER_AGENT}; - use hyper::{ - body, - service::{make_service_fn, service_fn}, - Body, Method, Request, Response, Server, - }; - use opentelemetry::{ - global, runtime, - sdk::{export::metrics::aggregation::cumulative_temporality_selector, metrics::selectors}, - Context, Key, KeyValue, - }; - use std::time::Duration; - use std::{net::SocketAddr, pin::Pin}; - - #[derive(Clone)] - struct TestRuntime { - tick_rx: tokio::sync::watch::Receiver, - } - impl runtime::Runtime for TestRuntime { - type Interval = futures_util::stream::Once>; - - type Delay = Pin>; - - fn interval(&self, _duration: Duration) -> Self::Interval { - let mut tick_rx = self.tick_rx.clone(); - futures_util::stream::once(Box::pin(async move { - let _ = tick_rx.changed().await.is_ok(); - })) - } - - fn spawn(&self, future: BoxFuture<'static, ()>) { - tokio::spawn(future); - } - - fn delay(&self, duration: Duration) -> Self::Delay { - Box::pin(tokio::time::sleep(duration)) - } - } - - #[tokio::test(flavor = "multi_thread")] - async fn integration_test() { - let (addr_tx, addr_rx) = tokio::sync::oneshot::channel(); - let (req_tx, mut req_rx) = tokio::sync::mpsc::channel(1); - let (tick_tx, tick_rx) = tokio::sync::watch::channel(0); - let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); - let cx = Context::new(); - - let addr: SocketAddr = "[::1]:0".parse().unwrap(); - - let server_handle = tokio::spawn(async move { - let make_svc = make_service_fn(move |_| { - let req_tx = req_tx.clone(); - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - let req_tx = req_tx.clone(); - async move { - if req.method() == Method::POST && req.uri().path() == "/test/a/b/c" { - req_tx.send(req).await.unwrap(); - Ok::<_, hyper::Error>(Response::new(Body::empty())) - } else { - req_tx.send(req).await.unwrap(); - Ok::<_, hyper::Error>( - Response::builder() - .status(http::StatusCode::METHOD_NOT_ALLOWED) - .body(Body::empty()) - .unwrap(), - ) - } - } - })) - } - }); - - let server = Server::bind(&addr).http1_only(true).serve(make_svc); - - addr_tx.send(server.local_addr()).unwrap(); - - println!( - "Starting http server on port {}", - server.local_addr().port() - ); - if let Err(err) = server - .with_graceful_shutdown(async move { - let _ = shutdown_rx.await; - }) - .await - { - panic!("failed to start http server, {:?}", err); - } - }); - - let addr = addr_rx.await.unwrap(); - - let pipeline = opentelemetry_dynatrace::new_pipeline().metrics( - selectors::simple::inexpensive(), - cumulative_temporality_selector(), - TestRuntime { tick_rx }, - ); - pipeline - .with_exporter(opentelemetry_dynatrace::new_exporter().with_export_config( - opentelemetry_dynatrace::ExportConfig { - endpoint: Some(format!("http://{}/test/a/b/c", addr)), - token: Some("1234567890".to_string()), - }, - )) - .with_prefix("example".to_string()) - .with_period(Duration::from_millis(100)) - .with_timestamp(false) - .build() - .unwrap(); - - let (req, _) = tokio::join!(req_rx.recv(), async move { - let meter = global::meter("ex.com/basic"); - - let recorder = meter.u64_counter("test1").init(); - recorder.add( - &cx, - 90, - &[ - KeyValue::new("A", "test1"), - KeyValue::new("B", "test2"), - KeyValue::new("C", "test3"), - ], - ); - - let recorder = meter.f64_counter("test2").init(); - recorder.add(&cx, 1e10 + 0.123, &[KeyValue::new("foo", "bar")]); - - let recorder = meter.i64_histogram("test3").init(); - recorder.record(&cx, -999, &[Key::new("foo").i64(-123)]); - - let _ = tick_tx.send(1); - }); - - assert!(req.is_some()); - - let req = req.unwrap(); - - assert_eq!(req.method(), Method::POST); - assert_eq!(req.uri().path(), "/test/a/b/c"); - assert_eq!( - req.headers().get(USER_AGENT), - Some(&HeaderValue::from_static("opentelemetry-metric-rust")), - ); - assert_eq!( - req.headers().get(AUTHORIZATION), - Some(&HeaderValue::from_str("Api-Token 1234567890").unwrap()), - ); - - let bytes = body::to_bytes(req.into_body()) - .await - .expect("http server body not readable"); - let body = String::from_utf8(bytes.to_vec()).expect("response is not valid utf-8"); - - // We're done with this test request, so shut down the server. - shutdown_tx - .send(()) - .expect("sender error while shutting down http server"); - - // Reap the task handle to ensure that the server did indeed shut down. - server_handle.await.expect("http server yielded an error"); - - let mut metric_lines: Vec<&str> = body.lines().collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&"example.test1,a=test1,b=test2,c=test3,dt.metrics.source=opentelemetry gauge,90"), - iter.next(), - ); - assert_eq!( - Some(&"example.test2,dt.metrics.source=opentelemetry,foo=bar gauge,10000000000.123"), - iter.next(), - ); - assert_eq!( - Some(&"example.test3,dt.metrics.source=opentelemetry,foo=-123 gauge,-999"), - iter.next(), - ); - assert_eq!(iter.next(), None); - } -} diff --git a/opentelemetry/README.md b/opentelemetry/README.md index ab2812f0a8..70ca36aa9c 100644 --- a/opentelemetry/README.md +++ b/opentelemetry/README.md @@ -71,7 +71,7 @@ In particular, the following crates are likely to be of interest: - [`opentelemetry-contrib`] provides additional exporters and propagators that are experimental. - [`opentelemetry-datadog`] provides additional exporters to [`Datadog`]. -- [`opentelemetry-dynatrace`] provides additional exporters to [`Dynatrace`]. +- [`opentelemetry-dynatrace`] *Deprecated, last release 0.4.0* provides additional exporters to [`Dynatrace`]. See [README](opentelemetry-dynatrace/README.md) - [`opentelemetry-http`] provides an interface for injecting and extracting trace information from [`http`] headers. - [`opentelemetry-jaeger`] provides a pipeline and exporter for sending trace diff --git a/scripts/lint.sh b/scripts/lint.sh index 8982f4e160..f6b697011b 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -40,14 +40,6 @@ if rustup component add clippy; then cargo_feature opentelemetry-jaeger "collector_client, wasm_collector_client" cargo_feature opentelemetry-jaeger "default" - cargo_feature opentelemetry-dynatrace "default" - cargo_feature opentelemetry-dynatrace "metrics,rt-tokio,reqwest-client" - cargo_feature opentelemetry-dynatrace "metrics,rt-tokio,reqwest-rustls" - cargo_feature opentelemetry-dynatrace "metrics,rt-tokio,reqwest-blocking-client" - cargo_feature opentelemetry-dynatrace "metrics,rt-tokio,isahc-client" - cargo_feature opentelemetry-dynatrace "metrics,rt-tokio,surf-client,surf/curl-client" - cargo_feature opentelemetry-dynatrace "metrics,rt-async-std" - cargo_feature opentelemetry-proto "default" cargo_feature opentelemetry-proto "full" cargo_feature opentelemetry-proto "gen-tonic,trace" diff --git a/scripts/test.sh b/scripts/test.sh index 3248631484..e2a4b0a0b7 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -12,7 +12,6 @@ cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -- --ignored cargo test --manifest-path=opentelemetry/Cargo.toml --all-features cargo test --manifest-path=opentelemetry-contrib/Cargo.toml --all-features -cargo test --manifest-path=opentelemetry-dynatrace/Cargo.toml --all-features cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --all-features cargo test --manifest-path=opentelemetry-otlp/Cargo.toml --features "trace,grpc-sys" --no-default-features cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml --all-features From e265d24fe43d3daf2fc8fbb4bfe0573555b59596 Mon Sep 17 00:00:00 2001 From: Kristopher Wuollett Date: Wed, 1 Nov 2023 12:37:55 -0500 Subject: [PATCH 03/68] fix: specify portable sed arguments (#1332) Copied from https://github.com/open-telemetry/opentelemetry-cpp/blob/ca08c5a34ad4af1b6b392c8bf5fc26a5210a2f2c/tools/format.sh#L16-L19. --- .../scripts/generate-consts-from-spec.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh index 569b7833fc..a667095ad6 100755 --- a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh +++ b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh @@ -42,10 +42,15 @@ docker run --rm \ --output /output/resource.rs \ --parameters conventions=resource +SED=(sed -i) +if [[ "$(uname)" = "Darwin" ]]; then + SED=(sed -i "") +fi + # Keep `SCHEMA_URL` key in sync with spec version -sed -i "s/\(opentelemetry.io\/schemas\/\)[^\"]*\"/\1$SPEC_VERSION\"/" src/lib.rs +"${SED[@]}" "s/\(opentelemetry.io\/schemas\/\)[^\"]*\"/\1$SPEC_VERSION\"/" src/lib.rs # handle doc generation failures -sed -i 's/\[2\]\.$//' src/resource.rs # remove trailing [2] from few of the doc comments +"${SED[@]}" 's/\[2\]\.$//' src/resource.rs # remove trailing [2] from few of the doc comments cargo fmt From 4dd54a2ab3d11dc1afcd382227f5d971df46818a Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Sun, 5 Nov 2023 21:56:51 -0800 Subject: [PATCH 04/68] Env over compling time config (#1323) * feat: favor env vars in jaeger exporters * feat: collectors * fix unit tests * unit tests * unit tests * add change logs --- CONTRIBUTING.md | 8 + opentelemetry-jaeger/CHANGELOG.md | 1 + opentelemetry-jaeger/src/exporter/agent.rs | 19 +- .../src/exporter/config/agent.rs | 91 ++--- .../src/exporter/config/collector/mod.rs | 313 +++++++++++++----- opentelemetry-jaeger/src/exporter/mod.rs | 65 ++-- opentelemetry-jaeger/src/exporter/runtime.rs | 14 +- 7 files changed, 335 insertions(+), 176 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c9ea9639cd..7d192d92c6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -111,6 +111,14 @@ The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. +### Priority of configurations +OpenTelemetry supports multiple ways to configure the API, SDK and other components. The priority of configurations is as follows: + +- Environment variables +- Compiling time configurations provided in the source code + + + ## Style Guide * Run `cargo clippy --all` - this will catch common mistakes and improve diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index acf49a24cb..1f28e055ca 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -6,6 +6,7 @@ - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) - Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) +- Prioritize environment variables over compiling time variables [#1323](https://github.com/open-telemetry/opentelemetry-rust/pull/1323) ## v0.19.0 diff --git a/opentelemetry-jaeger/src/exporter/agent.rs b/opentelemetry-jaeger/src/exporter/agent.rs index dfbc0b706f..8982d526c8 100644 --- a/opentelemetry-jaeger/src/exporter/agent.rs +++ b/opentelemetry-jaeger/src/exporter/agent.rs @@ -1,5 +1,5 @@ //! # UDP Jaeger Agent Client -use crate::exporter::addrs_and_family; +use crate::exporter::address_family; use crate::exporter::runtime::JaegerTraceRuntime; use crate::exporter::thrift::{ agent::{self, TAgentSyncClient}, @@ -7,7 +7,7 @@ use crate::exporter::thrift::{ }; use crate::exporter::transport::{TBufferChannel, TNoopChannel}; use std::fmt; -use std::net::{ToSocketAddrs, UdpSocket}; +use std::net::{SocketAddr, UdpSocket}; use thrift::{ protocol::{TCompactInputProtocol, TCompactOutputProtocol}, transport::{ReadHalf, TIoChannel, WriteHalf}, @@ -43,10 +43,10 @@ pub(crate) struct AgentSyncClientUdp { impl AgentSyncClientUdp { /// Create a new UDP agent client - pub(crate) fn new( - agent_endpoint: T, + pub(crate) fn new( max_packet_size: usize, auto_split: bool, + agent_address: Vec, ) -> thrift::Result { let (buffer, write) = TBufferChannel::with_capacity(max_packet_size).split()?; let client = agent::AgentSyncClient::new( @@ -54,9 +54,8 @@ impl AgentSyncClientUdp { TCompactOutputProtocol::new(write), ); - let (addrs, family) = addrs_and_family(&agent_endpoint)?; - let conn = UdpSocket::bind(family)?; - conn.connect(addrs.as_slice())?; + let conn = UdpSocket::bind(address_family(agent_address.as_slice()))?; + conn.connect(agent_address.as_slice())?; Ok(AgentSyncClientUdp { conn, @@ -102,11 +101,11 @@ pub(crate) struct AgentAsyncClientUdp { impl AgentAsyncClientUdp { /// Create a new UDP agent client - pub(crate) fn new( - agent_endpoint: T, + pub(crate) fn new( max_packet_size: usize, runtime: R, auto_split: bool, + agent_address: Vec, ) -> thrift::Result { let (buffer, write) = TBufferChannel::with_capacity(max_packet_size).split()?; let client = agent::AgentSyncClient::new( @@ -114,7 +113,7 @@ impl AgentAsyncClientUdp { TCompactOutputProtocol::new(write), ); - let conn = runtime.create_socket(agent_endpoint)?; + let conn = runtime.create_socket(agent_address.as_slice())?; Ok(AgentAsyncClientUdp { runtime, diff --git a/opentelemetry-jaeger/src/exporter/config/agent.rs b/opentelemetry-jaeger/src/exporter/config/agent.rs index 36d928e30b..8bc3945c71 100644 --- a/opentelemetry-jaeger/src/exporter/config/agent.rs +++ b/opentelemetry-jaeger/src/exporter/config/agent.rs @@ -1,3 +1,15 @@ +use std::borrow::BorrowMut; +use std::net::ToSocketAddrs; +use std::sync::Arc; +use std::{env, net}; + +use opentelemetry::trace::TraceError; +use opentelemetry_sdk::trace::{BatchSpanProcessor, Tracer}; +use opentelemetry_sdk::{ + self, + trace::{BatchConfig, Config, TracerProvider}, +}; + use crate::exporter::agent::{AgentAsyncClientUdp, AgentSyncClientUdp}; use crate::exporter::config::{ build_config_and_process, install_tracer_provider_and_get_tracer, HasRequiredConfig, @@ -5,15 +17,6 @@ use crate::exporter::config::{ }; use crate::exporter::uploader::{AsyncUploader, SyncUploader, Uploader}; use crate::{Error, Exporter, JaegerTraceRuntime}; -use opentelemetry::trace::TraceError; -use opentelemetry_sdk::trace::{BatchSpanProcessor, Tracer}; -use opentelemetry_sdk::{ - self, - trace::{BatchConfig, Config, TracerProvider}, -}; -use std::borrow::BorrowMut; -use std::sync::Arc; -use std::{env, net}; /// The max size of UDP packet we want to send, synced with jaeger-agent const UDP_PACKET_MAX_LENGTH: usize = 65_000; @@ -78,38 +81,23 @@ pub struct AgentPipeline { transformation_config: TransformationConfig, trace_config: Option, batch_config: Option, - agent_endpoint: Result, crate::Error>, + agent_endpoint: Option, max_packet_size: usize, auto_split_batch: bool, } impl Default for AgentPipeline { fn default() -> Self { - let mut pipeline = AgentPipeline { + AgentPipeline { transformation_config: Default::default(), trace_config: Default::default(), batch_config: Some(Default::default()), - agent_endpoint: Ok(vec![format!( + agent_endpoint: Some(format!( "{DEFAULT_AGENT_ENDPOINT_HOST}:{DEFAULT_AGENT_ENDPOINT_PORT}" - ) - .parse() - .unwrap()]), + )), max_packet_size: UDP_PACKET_MAX_LENGTH, auto_split_batch: false, - }; - - let endpoint = match (env::var(ENV_AGENT_HOST), env::var(ENV_AGENT_PORT)) { - (Ok(host), Ok(port)) => Some(format!("{}:{}", host.trim(), port.trim())), - (Ok(host), _) => Some(format!("{}:{DEFAULT_AGENT_ENDPOINT_PORT}", host.trim())), - (_, Ok(port)) => Some(format!("{DEFAULT_AGENT_ENDPOINT_HOST}:{}", port.trim())), - (_, _) => None, - }; - - if let Some(endpoint) = endpoint { - pipeline = pipeline.with_endpoint(endpoint); } - - pipeline } } @@ -147,16 +135,9 @@ impl AgentPipeline { /// Any valid socket address can be used. /// /// Default to be `127.0.0.1:6831`. - pub fn with_endpoint(self, agent_endpoint: T) -> Self { + pub fn with_endpoint>(self, agent_endpoint: T) -> Self { AgentPipeline { - agent_endpoint: agent_endpoint - .to_socket_addrs() - .map(|addrs| addrs.collect()) - .map_err(|io_err| crate::Error::ConfigError { - pipeline_name: "agent", - config_name: "endpoint", - reason: io_err.to_string(), - }), + agent_endpoint: Some(agent_endpoint.into()), ..self } } @@ -391,10 +372,10 @@ impl AgentPipeline { R: JaegerTraceRuntime, { let agent = AgentAsyncClientUdp::new( - self.agent_endpoint?.as_slice(), self.max_packet_size, runtime, self.auto_split_batch, + self.resolve_endpoint()?, ) .map_err::(Into::into)?; Ok(Arc::new(AsyncUploader::Agent( @@ -404,13 +385,38 @@ impl AgentPipeline { fn build_sync_agent_uploader(self) -> Result, TraceError> { let agent = AgentSyncClientUdp::new( - self.agent_endpoint?.as_slice(), self.max_packet_size, self.auto_split_batch, + self.resolve_endpoint()?, ) .map_err::(Into::into)?; Ok(Arc::new(SyncUploader::Agent(std::sync::Mutex::new(agent)))) } + + // resolve the agent endpoint from the environment variables or the builder + // if only one of the environment variables is set, the other one will be set to the default value + // if no environment variable is set, the builder value will be used. + fn resolve_endpoint(self) -> Result, TraceError> { + let endpoint_str = match (env::var(ENV_AGENT_HOST), env::var(ENV_AGENT_PORT)) { + (Ok(host), Ok(port)) => format!("{}:{}", host.trim(), port.trim()), + (Ok(host), _) => format!("{}:{DEFAULT_AGENT_ENDPOINT_PORT}", host.trim()), + (_, Ok(port)) => format!("{DEFAULT_AGENT_ENDPOINT_HOST}:{}", port.trim()), + (_, _) => self.agent_endpoint.unwrap_or(format!( + "{DEFAULT_AGENT_ENDPOINT_HOST}:{DEFAULT_AGENT_ENDPOINT_PORT}" + )), + }; + endpoint_str + .to_socket_addrs() + .map(|addrs| addrs.collect()) + .map_err(|io_err| { + Error::ConfigError { + pipeline_name: "agent", + config_name: "endpoint", + reason: io_err.to_string(), + } + .into() + }) + } } #[cfg(test)] @@ -429,9 +435,12 @@ mod tests { ("127.0.0.1:1001", true), ]; for (socket_str, is_ok) in test_cases.into_iter() { - let pipeline = AgentPipeline::default().with_endpoint(socket_str); + let resolved_endpoint = AgentPipeline::default() + .with_endpoint(socket_str) + .resolve_endpoint(); assert_eq!( - pipeline.agent_endpoint.is_ok(), + resolved_endpoint.is_ok(), + // if is_ok is true, use socket_str, otherwise use the default endpoint is_ok, "endpoint string {}", socket_str diff --git a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs index 7777d3c4bd..0b6794ffff 100644 --- a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs +++ b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs @@ -1,12 +1,3 @@ -use crate::exporter::config::{ - build_config_and_process, install_tracer_provider_and_get_tracer, HasRequiredConfig, - TransformationConfig, -}; -use crate::exporter::uploader::{AsyncUploader, Uploader}; -use crate::{Exporter, JaegerTraceRuntime}; -use http::Uri; -use opentelemetry::trace::TraceError; -use opentelemetry_sdk::trace::{BatchConfig, BatchSpanProcessor, Config, Tracer, TracerProvider}; use std::borrow::BorrowMut; use std::convert::TryFrom; use std::env; @@ -14,16 +5,25 @@ use std::sync::Arc; #[cfg(feature = "collector_client")] use std::time::Duration; +use http::Uri; + +use opentelemetry::trace::TraceError; #[cfg(feature = "collector_client")] use opentelemetry_http::HttpClient; +use opentelemetry_sdk::trace::{BatchConfig, BatchSpanProcessor, Config, Tracer, TracerProvider}; #[cfg(feature = "collector_client")] use crate::config::collector::http_client::CollectorHttpClient; - #[cfg(feature = "collector_client")] use crate::exporter::collector::AsyncHttpClient; #[cfg(feature = "wasm_collector_client")] use crate::exporter::collector::WasmCollector; +use crate::exporter::config::{ + build_config_and_process, install_tracer_provider_and_get_tracer, HasRequiredConfig, + TransformationConfig, +}; +use crate::exporter::uploader::{AsyncUploader, Uploader}; +use crate::{Exporter, JaegerTraceRuntime}; #[cfg(feature = "collector_client")] mod http_client; @@ -43,7 +43,7 @@ const ENV_TIMEOUT: &str = "OTEL_EXPORTER_JAEGER_TIMEOUT"; const DEFAULT_COLLECTOR_TIMEOUT: Duration = Duration::from_secs(10); /// Username to send as part of "Basic" authentication to the collector endpoint. -const ENV_USER: &str = "OTEL_EXPORTER_JAEGER_USER"; +const ENV_USERNAME: &str = "OTEL_EXPORTER_JAEGER_USER"; /// Password to send as part of "Basic" authentication to the collector endpoint. const ENV_PASSWORD: &str = "OTEL_EXPORTER_JAEGER_PASSWORD"; @@ -97,7 +97,7 @@ pub struct CollectorPipeline { #[cfg(feature = "collector_client")] collector_timeout: Duration, // only used by builtin http clients. - collector_endpoint: Option>, + collector_endpoint: Option, collector_username: Option, collector_password: Option, @@ -106,7 +106,7 @@ pub struct CollectorPipeline { impl Default for CollectorPipeline { fn default() -> Self { - let mut pipeline = Self { + Self { #[cfg(feature = "collector_client")] collector_timeout: DEFAULT_COLLECTOR_TIMEOUT, collector_endpoint: None, @@ -116,33 +116,7 @@ impl Default for CollectorPipeline { transformation_config: Default::default(), trace_config: Default::default(), batch_config: Some(Default::default()), - }; - - #[cfg(feature = "collector_client")] - if let Some(timeout) = env::var(ENV_TIMEOUT).ok().filter(|var| !var.is_empty()) { - let timeout = match timeout.parse() { - Ok(timeout) => Duration::from_millis(timeout), - Err(e) => { - eprintln!("{} malformed defaulting to 10000: {}", ENV_TIMEOUT, e); - DEFAULT_COLLECTOR_TIMEOUT - } - }; - pipeline = pipeline.with_timeout(timeout); } - - if let Some(endpoint) = env::var(ENV_ENDPOINT).ok().filter(|var| !var.is_empty()) { - pipeline = pipeline.with_endpoint(endpoint); - } - - if let Some(user) = env::var(ENV_USER).ok().filter(|var| !var.is_empty()) { - pipeline = pipeline.with_username(user); - } - - if let Some(password) = env::var(ENV_PASSWORD).ok().filter(|var| !var.is_empty()) { - pipeline = pipeline.with_password(password); - } - - pipeline } } @@ -224,15 +198,9 @@ impl CollectorPipeline { /// Set the collector endpoint. /// /// E.g. "http://localhost:14268/api/traces" - pub fn with_endpoint(self, collector_endpoint: T) -> Self - where - http::Uri: core::convert::TryFrom, - >::Error: Into, - { + pub fn with_endpoint>(self, collector_endpoint: T) -> Self { Self { - collector_endpoint: Some( - core::convert::TryFrom::try_from(collector_endpoint).map_err(Into::into), - ), + collector_endpoint: Some(collector_endpoint.into()), ..self } } @@ -491,64 +459,95 @@ impl CollectorPipeline { where R: JaegerTraceRuntime, { - let endpoint = self - .collector_endpoint - .transpose() - .map_err::(|err| crate::Error::ConfigError { - pipeline_name: "collector", - config_name: "collector_endpoint", - reason: format!("invalid uri, {}", err), - })? - .unwrap_or_else(|| { - Uri::try_from(DEFAULT_ENDPOINT).unwrap() // default endpoint should always valid - }); + let endpoint = self.resolve_endpoint()?; + let username = self.resolve_username(); + let password = self.resolve_password(); + #[cfg(feature = "collector_client")] + let timeout = self.resolve_timeout(); match self.client_config { #[cfg(feature = "collector_client")] ClientConfig::Http { client_type } => { - let client = client_type.build_client( - self.collector_username, - self.collector_password, - self.collector_timeout, - )?; + let client = client_type.build_client(username, password, timeout)?; let collector = AsyncHttpClient::new(endpoint, client); Ok(Arc::new(AsyncUploader::::Collector(collector))) } #[cfg(feature = "wasm_collector_client")] ClientConfig::Wasm => { - let collector = - WasmCollector::new(endpoint, self.collector_username, self.collector_password) - .map_err::(Into::into)?; + let collector = WasmCollector::new(endpoint, username, password) + .map_err::(Into::into)?; Ok(Arc::new(AsyncUploader::::WasmCollector(collector))) } } } + + fn resolve_env_var(env_var: &'static str) -> Option { + env::var(env_var).ok().filter(|var| !var.is_empty()) + } + + // if provided value from environment variable or the builder is invalid, return error + fn resolve_endpoint(&self) -> Result { + let endpoint_from_env = Self::resolve_env_var(ENV_ENDPOINT) + .map(|endpoint| { + Uri::try_from(endpoint.as_str()).map_err::(|err| { + crate::Error::ConfigError { + pipeline_name: "collector", + config_name: "collector_endpoint", + reason: format!("invalid uri from environment variable, {}", err), + } + }) + }) + .transpose()?; + + Ok(match endpoint_from_env { + Some(endpoint) => endpoint, + None => { + if let Some(endpoint) = &self.collector_endpoint { + Uri::try_from(endpoint.as_str()).map_err::(|err| { + crate::Error::ConfigError { + pipeline_name: "collector", + config_name: "collector_endpoint", + reason: format!("invalid uri from the builder, {}", err), + } + })? + } else { + Uri::try_from(DEFAULT_ENDPOINT).unwrap() // default endpoint should always valid + } + } + }) + } + + #[cfg(feature = "collector_client")] + fn resolve_timeout(&self) -> Duration { + match Self::resolve_env_var(ENV_TIMEOUT) { + Some(timeout) => match timeout.parse() { + Ok(timeout) => Duration::from_millis(timeout), + Err(e) => { + eprintln!("{} malformed default to 10s: {}", ENV_TIMEOUT, e); + self.collector_timeout + } + }, + None => self.collector_timeout, + } + } + + fn resolve_username(&self) -> Option { + Self::resolve_env_var(ENV_USERNAME).or_else(|| self.collector_username.clone()) + } + + fn resolve_password(&self) -> Option { + Self::resolve_env_var(ENV_PASSWORD).or_else(|| self.collector_password.clone()) + } } #[cfg(test)] #[cfg(feature = "rt-tokio")] mod tests { - use super::*; - use crate::config::collector::http_client::test_http_client; use opentelemetry_sdk::runtime::Tokio; - #[test] - fn test_collector_defaults() { - // No Env Variable - std::env::remove_var(ENV_TIMEOUT); - let builder = CollectorPipeline::default(); - assert_eq!(DEFAULT_COLLECTOR_TIMEOUT, builder.collector_timeout); - - // Bad Env Variable - std::env::set_var(ENV_TIMEOUT, "a"); - let builder = CollectorPipeline::default(); - assert_eq!(DEFAULT_COLLECTOR_TIMEOUT, builder.collector_timeout); + use crate::config::collector::http_client::test_http_client; - // Good Env Variable - std::env::set_var(ENV_TIMEOUT, "777"); - let builder = CollectorPipeline::default(); - assert_eq!(Duration::from_millis(777), builder.collector_timeout); - } + use super::*; #[test] fn test_set_collector_endpoint() { @@ -559,7 +558,7 @@ mod tests { assert!(invalid_uri.is_err()); assert_eq!( format!("{:?}", invalid_uri.err().unwrap()), - "ConfigError { pipeline_name: \"collector\", config_name: \"collector_endpoint\", reason: \"invalid uri, invalid format\" }", + "ConfigError { pipeline_name: \"collector\", config_name: \"collector_endpoint\", reason: \"invalid uri from the builder, invalid format\" }", ); let valid_uri = new_collector_pipeline() @@ -578,4 +577,148 @@ mod tests { .build_collector_exporter::(); assert!(exporter.is_ok()); } + + #[test] + fn test_resolve_endpoint() { + struct TestCase<'a> { + description: &'a str, + env_var: &'a str, + builder_endpoint: Option<&'a str>, + expected_result: Result, + } + let test_cases = vec![ + TestCase { + description: "Positive: Endpoint from environment variable exists", + env_var: "http://example.com", + builder_endpoint: None, + expected_result: Ok(Uri::try_from("http://example.com").unwrap()), + }, + TestCase { + description: "Positive: Endpoint from builder", + env_var: "", + builder_endpoint: Some("http://example.com"), + expected_result: Ok(Uri::try_from("http://example.com").unwrap()), + }, + TestCase { + description: "Negative: Invalid URI from environment variable", + env_var: "invalid random uri", + builder_endpoint: None, + expected_result: Err(crate::Error::ConfigError { + pipeline_name: "collector", + config_name: "collector_endpoint", + reason: "invalid uri from environment variable, invalid uri character" + .to_string(), + }), + }, + TestCase { + description: "Negative: Invalid URI from builder", + env_var: "", + builder_endpoint: Some("invalid random uri"), + expected_result: Err(crate::Error::ConfigError { + pipeline_name: "collector", + config_name: "collector_endpoint", + reason: "invalid uri from the builder, invalid uri character".to_string(), + }), + }, + TestCase { + description: "Positive: Default endpoint (no environment variable set)", + env_var: "", + builder_endpoint: None, + expected_result: Ok(Uri::try_from(DEFAULT_ENDPOINT).unwrap()), + }, + ]; + for test_case in test_cases { + env::set_var(ENV_ENDPOINT, test_case.env_var); + let builder = CollectorPipeline { + collector_endpoint: test_case.builder_endpoint.map(|s| s.to_string()), + ..Default::default() + }; + let result = builder.resolve_endpoint(); + match test_case.expected_result { + Ok(expected) => { + assert_eq!(result.unwrap(), expected, "{}", test_case.description); + } + Err(expected_err) => { + assert!( + result.is_err(), + "{}, expected error, get {}", + test_case.description, + result.unwrap() + ); + match (result.unwrap_err(), expected_err) { + ( + crate::Error::ConfigError { + pipeline_name: result_pipeline_name, + config_name: result_config_name, + reason: result_reason, + }, + crate::Error::ConfigError { + pipeline_name: expected_pipeline_name, + config_name: expected_config_name, + reason: expected_reason, + }, + ) => { + assert_eq!( + result_pipeline_name, expected_pipeline_name, + "{}", + test_case.description + ); + assert_eq!( + result_config_name, expected_config_name, + "{}", + test_case.description + ); + assert_eq!(result_reason, expected_reason, "{}", test_case.description); + } + _ => panic!("we don't expect collector to return other error"), + } + } + } + env::remove_var(ENV_ENDPOINT); + } + } + + #[test] + fn test_resolve_timeout() { + struct TestCase<'a> { + description: &'a str, + env_var: &'a str, + builder_var: Option, + expected_duration: Duration, + } + let test_cases = vec![ + TestCase { + description: "Valid environment variable", + env_var: "5000", + builder_var: None, + expected_duration: Duration::from_millis(5000), + }, + TestCase { + description: "Invalid environment variable", + env_var: "invalid", + builder_var: None, + expected_duration: DEFAULT_COLLECTOR_TIMEOUT, + }, + TestCase { + description: "Missing environment variable", + env_var: "", + builder_var: Some(Duration::from_millis(5000)), + expected_duration: Duration::from_millis(5000), + }, + ]; + for test_case in test_cases { + env::set_var(ENV_TIMEOUT, test_case.env_var); + let mut builder = CollectorPipeline::default(); + if let Some(timeout) = test_case.builder_var { + builder = builder.with_timeout(timeout); + } + let result = builder.resolve_timeout(); + assert_eq!( + result, test_case.expected_duration, + "{}", + test_case.description + ); + env::remove_var(ENV_TIMEOUT); + } + } } diff --git a/opentelemetry-jaeger/src/exporter/mod.rs b/opentelemetry-jaeger/src/exporter/mod.rs index 698cc0d646..be3ebe4c32 100644 --- a/opentelemetry-jaeger/src/exporter/mod.rs +++ b/opentelemetry-jaeger/src/exporter/mod.rs @@ -1,25 +1,20 @@ //! # Jaeger Exporter //! -mod agent; -#[cfg(any(feature = "collector_client", feature = "wasm_collector_client"))] -mod collector; -pub(crate) mod runtime; -#[allow(clippy::all, unreachable_pub, dead_code)] -#[rustfmt::skip] // don't format generated files -mod thrift; -pub mod config; -pub(crate) mod transport; -mod uploader; - // Linting isn't detecting that it's used seems like linting bug. #[allow(unused_imports)] #[cfg(feature = "surf_collector_client")] use std::convert::TryFrom; +use std::convert::TryInto; +use std::fmt::Display; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; -use self::runtime::JaegerTraceRuntime; -use self::thrift::jaeger; -use crate::exporter::uploader::Uploader; use futures_core::future::BoxFuture; +#[cfg(feature = "isahc_collector_client")] +#[allow(unused_imports)] // this is actually used to configure authentication +use isahc::prelude::Configurable; + use opentelemetry::{ trace::{Event, Link, SpanKind, Status}, InstrumentationLibrary, Key, KeyValue, @@ -31,16 +26,22 @@ use opentelemetry_sdk::{ }, trace::EvictedQueue, }; -use std::convert::TryInto; -use std::fmt::Display; -use std::io; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}; -use std::sync::Arc; -use std::time::{Duration, SystemTime}; -#[cfg(feature = "isahc_collector_client")] -#[allow(unused_imports)] // this is actually used to configure authentication -use isahc::prelude::Configurable; +use crate::exporter::uploader::Uploader; + +use self::runtime::JaegerTraceRuntime; +use self::thrift::jaeger; + +mod agent; +#[cfg(any(feature = "collector_client", feature = "wasm_collector_client"))] +mod collector; +pub(crate) mod runtime; +#[allow(clippy::all, unreachable_pub, dead_code)] +#[rustfmt::skip] // don't format generated files +mod thrift; +pub mod config; +pub(crate) mod transport; +mod uploader; /// Instrument Library name MUST be reported in Jaeger Span tags with the following key const INSTRUMENTATION_LIBRARY_NAME: &str = "otel.library.name"; @@ -341,27 +342,25 @@ impl ExportError for Error { /// Sample the first address provided to designate which IP family to bind the socket to. /// IP families returned be INADDR_ANY as [`Ipv4Addr::UNSPECIFIED`] or /// IN6ADDR_ANY as [`Ipv6Addr::UNSPECIFIED`]. -fn addrs_and_family( - host_port: &impl ToSocketAddrs, -) -> Result<(Vec, SocketAddr), io::Error> { - let addrs = host_port.to_socket_addrs()?.collect::>(); - let family = match addrs.first() { +fn address_family(addrs: &[SocketAddr]) -> SocketAddr { + match addrs.first() { Some(SocketAddr::V4(_)) | None => SocketAddr::from((Ipv4Addr::UNSPECIFIED, 0)), Some(SocketAddr::V6(_)) => SocketAddr::from((Ipv6Addr::UNSPECIFIED, 0)), - }; - Ok((addrs, family)) + } } #[cfg(test)] mod tests { - use super::SPAN_KIND; - use crate::exporter::thrift::jaeger::Tag; - use crate::exporter::{build_span_tags, OTEL_STATUS_CODE, OTEL_STATUS_DESCRIPTION}; use opentelemetry::{ trace::{SpanKind, Status}, KeyValue, }; + use crate::exporter::thrift::jaeger::Tag; + use crate::exporter::{build_span_tags, OTEL_STATUS_CODE, OTEL_STATUS_DESCRIPTION}; + + use super::SPAN_KIND; + fn assert_tag_contains(tags: Vec, key: &'static str, expect_val: &'static str) { assert_eq!( tags.into_iter() diff --git a/opentelemetry-jaeger/src/exporter/runtime.rs b/opentelemetry-jaeger/src/exporter/runtime.rs index 5348eefbcf..2c3f29dc41 100644 --- a/opentelemetry-jaeger/src/exporter/runtime.rs +++ b/opentelemetry-jaeger/src/exporter/runtime.rs @@ -3,7 +3,7 @@ feature = "rt-tokio", feature = "rt-tokio-current-thread" ))] -use crate::exporter::addrs_and_family; +use crate::exporter::address_family; use async_trait::async_trait; use opentelemetry_sdk::runtime::RuntimeChannel; use std::net::ToSocketAddrs; @@ -29,8 +29,8 @@ impl JaegerTraceRuntime for opentelemetry_sdk::runtime::Tokio { type Socket = tokio::net::UdpSocket; fn create_socket(&self, endpoint: T) -> thrift::Result { - let (addrs, family) = addrs_and_family(&endpoint)?; - let conn = std::net::UdpSocket::bind(family)?; + let addrs = endpoint.to_socket_addrs()?.collect::>(); + let conn = std::net::UdpSocket::bind(address_family(addrs.as_slice()))?; conn.connect(addrs.as_slice())?; Ok(tokio::net::UdpSocket::from_std(conn)?) } @@ -48,8 +48,8 @@ impl JaegerTraceRuntime for opentelemetry_sdk::runtime::TokioCurrentThread { type Socket = tokio::net::UdpSocket; fn create_socket(&self, endpoint: T) -> thrift::Result { - let (addrs, family) = addrs_and_family(&endpoint)?; - let conn = std::net::UdpSocket::bind(family)?; + let addrs = endpoint.to_socket_addrs()?.collect::>(); + let conn = std::net::UdpSocket::bind(address_family(addrs.as_slice()))?; conn.connect(addrs.as_slice())?; Ok(tokio::net::UdpSocket::from_std(conn)?) } @@ -67,8 +67,8 @@ impl JaegerTraceRuntime for opentelemetry_sdk::runtime::AsyncStd { type Socket = async_std::net::UdpSocket; fn create_socket(&self, endpoint: T) -> thrift::Result { - let (addrs, family) = addrs_and_family(&endpoint)?; - let conn = std::net::UdpSocket::bind(family)?; + let addrs = endpoint.to_socket_addrs()?.collect::>(); + let conn = std::net::UdpSocket::bind(address_family(addrs.as_slice()))?; conn.connect(addrs.as_slice())?; Ok(async_std::net::UdpSocket::from(conn)) } From 47881b20a2b8e94d8e1cdbd4877852dd74cc07de Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Mon, 6 Nov 2023 18:35:16 -0500 Subject: [PATCH 05/68] Prepare for v0.21.0 release (#1343) --- opentelemetry-appender-log/CHANGELOG.md | 2 ++ opentelemetry-appender-log/Cargo.toml | 2 +- opentelemetry-appender-tracing/CHANGELOG.md | 2 ++ opentelemetry-appender-tracing/Cargo.toml | 4 ++-- opentelemetry-aws/CHANGELOG.md | 2 ++ opentelemetry-aws/Cargo.toml | 2 +- opentelemetry-contrib/CHANGELOG.md | 2 ++ opentelemetry-contrib/Cargo.toml | 6 +++--- opentelemetry-datadog/CHANGELOG.md | 2 ++ opentelemetry-datadog/Cargo.toml | 8 ++++---- opentelemetry-http/CHANGELOG.md | 2 ++ opentelemetry-http/Cargo.toml | 2 +- opentelemetry-jaeger/CHANGELOG.md | 2 ++ opentelemetry-jaeger/Cargo.toml | 8 ++++---- opentelemetry-otlp/CHANGELOG.md | 2 ++ opentelemetry-otlp/Cargo.toml | 10 +++++----- opentelemetry-prometheus/CHANGELOG.md | 2 ++ opentelemetry-prometheus/Cargo.toml | 4 ++-- opentelemetry-proto/CHANGELOG.md | 2 ++ opentelemetry-proto/Cargo.toml | 4 ++-- opentelemetry-sdk/CHANGELOG.md | 2 ++ opentelemetry-sdk/Cargo.toml | 4 ++-- opentelemetry-semantic-conventions/CHANGELOG.md | 2 ++ opentelemetry-semantic-conventions/Cargo.toml | 2 +- opentelemetry-stackdriver/CHANGELOG.md | 2 ++ opentelemetry-stackdriver/Cargo.toml | 6 +++--- opentelemetry-stdout/CHANGELOG.md | 4 ++++ opentelemetry-stdout/Cargo.toml | 8 ++++---- opentelemetry-user-events-logs/CHANGELOG.md | 2 ++ opentelemetry-user-events-logs/Cargo.toml | 4 ++-- opentelemetry-user-events-metrics/CHANGELOG.md | 2 ++ opentelemetry-user-events-metrics/Cargo.toml | 6 +++--- opentelemetry-zipkin/CHANGELOG.md | 2 ++ opentelemetry-zipkin/Cargo.toml | 8 ++++---- opentelemetry-zpages/CHANGELOG.md | 2 ++ opentelemetry-zpages/Cargo.toml | 8 ++++---- opentelemetry/CHANGELOG.md | 4 ++++ 37 files changed, 90 insertions(+), 48 deletions(-) diff --git a/opentelemetry-appender-log/CHANGELOG.md b/opentelemetry-appender-log/CHANGELOG.md index 27b551c1e0..63e646aed1 100644 --- a/opentelemetry-appender-log/CHANGELOG.md +++ b/opentelemetry-appender-log/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.2.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-appender-log/Cargo.toml b/opentelemetry-appender-log/Cargo.toml index 54e70d46fc..339b5bcc02 100644 --- a/opentelemetry-appender-log/Cargo.toml +++ b/opentelemetry-appender-log/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-appender-log" -version = "0.1.0" +version = "0.2.0" description = "An OpenTelemetry appender for the log crate" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log" diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index 33218c30ca..5eedb4d1ef 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.2.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index c7a956fab8..c2eba3c4d7 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-appender-tracing" -version = "0.1.0" +version = "0.2.0" edition = "2021" description = "An OpenTelemetry log appender for the tracing crate" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing" @@ -12,7 +12,7 @@ rust-version = "1.65" [dependencies] opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["logs"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["logs"] } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["logs"] } tracing = {version = "0.1.37", default-features = false, features = ["std"]} tracing-core = "0.1.31" tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md index c7f866e5fb..6b6327db14 100644 --- a/opentelemetry-aws/CHANGELOG.md +++ b/opentelemetry-aws/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.9.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml index 3dc2d7ba29..b6f42e1357 100644 --- a/opentelemetry-aws/Cargo.toml +++ b/opentelemetry-aws/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-aws" -version = "0.8.0" +version = "0.9.0" description = "AWS exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md index 9802ccc1dd..d942534684 100644 --- a/opentelemetry-contrib/CHANGELOG.md +++ b/opentelemetry-contrib/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.13.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml index ae5c19fdc6..d5c6eb054d 100644 --- a/opentelemetry-contrib/Cargo.toml +++ b/opentelemetry-contrib/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-contrib" -version = "0.12.0" +version = "0.13.0" description = "Rust contrib repo for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" @@ -36,8 +36,8 @@ futures-core = { version = "0.3", optional = true } futures-util = { version = "0.3", optional = true, default-features = false } once_cell = "1.17.1" opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", optional = true, path = "../opentelemetry-sdk" } -opentelemetry-semantic-conventions = { version = "0.12", optional = true, path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", optional = true, path = "../opentelemetry-sdk" } +opentelemetry-semantic-conventions = { version = "0.13", optional = true, path = "../opentelemetry-semantic-conventions" } serde_json = { version = "1", optional = true } tokio = { version = "1.0", features = ["fs", "io-util"], optional = true } diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md index bb98caf3e8..48d6227464 100644 --- a/opentelemetry-datadog/CHANGELOG.md +++ b/opentelemetry-datadog/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.9.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml index 42c4a1c1f2..9b78dbe0a4 100644 --- a/opentelemetry-datadog/Cargo.toml +++ b/opentelemetry-datadog/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-datadog" -version = "0.8.0" +version = "0.9.0" description = "Datadog exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" @@ -27,9 +27,9 @@ surf-client = ["surf", "opentelemetry-http/surf"] indexmap = "2.0" once_cell = "1.12" opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["trace"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["trace"] } -opentelemetry-http = { version = "0.9", path = "../opentelemetry-http" } -opentelemetry-semantic-conventions = { version = "0.12", path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["trace"] } +opentelemetry-http = { version = "0.10", path = "../opentelemetry-http" } +opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } rmp = "0.8" url = "2.2" reqwest = { version = "0.11", default-features = false, optional = true } diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index 042a74d019..8562d343f4 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.10.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index 4db072a8c2..e669456669 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-http" -version = "0.9.0" +version = "0.10.0" description = "Helper implementations for exchange of traces and metrics over HTTP" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index 1f28e055ca..c109b677db 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.20.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index f4ad9868f8..0cc3aac4db 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-jaeger" -version = "0.19.0" +version = "0.20.0" description = "Jaeger exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger" @@ -30,9 +30,9 @@ hyper-tls = { version = "0.5.0", default-features = false, optional = true } isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3", optional = true } opentelemetry = { version = "0.21", default-features = false, features = ["trace"], path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", default-features = false, features = ["trace"], path = "../opentelemetry-sdk" } -opentelemetry-http = { version = "0.9", path = "../opentelemetry-http", optional = true } -opentelemetry-semantic-conventions = { version = "0.12", path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", default-features = false, features = ["trace"], path = "../opentelemetry-sdk" } +opentelemetry-http = { version = "0.10", path = "../opentelemetry-http", optional = true } +opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } pin-project-lite = { version = "0.2", optional = true } reqwest = { version = "0.11", default-features = false, optional = true } surf = { version = "2.0", optional = true } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 555b5764d9..4a23e000e1 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.14.0 + ### Added - Add `build_{signal}_exporter` methods to client builders (#1187) diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index d9d60fe252..35cae87b5f 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-otlp" -version = "0.13.0" +version = "0.14.0" description = "Exporter for the OpenTelemetry Collector" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" @@ -28,12 +28,12 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] async-trait = "0.1" futures-core = "0.3" -opentelemetry-proto = { version = "0.3", path = "../opentelemetry-proto", default-features = false } grpcio = { version = "0.12", optional = true } opentelemetry = { version = "0.21", default-features = false, path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", default-features = false, path = "../opentelemetry-sdk" } -opentelemetry-http = { version = "0.9", path = "../opentelemetry-http", optional = true } -opentelemetry-semantic-conventions = { version = "0.12", path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", default-features = false, path = "../opentelemetry-sdk" } +opentelemetry-http = { version = "0.10", path = "../opentelemetry-http", optional = true } +opentelemetry-proto = { version = "0.4", path = "../opentelemetry-proto", default-features = false } +opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } prost = { version = "0.11.0", optional = true } tonic = { version = "0.9.0", optional = true } diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index 667d1c6644..e7ced72311 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.14.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index aa3bd50436..e883557449 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-prometheus" -version = "0.13.0" +version = "0.14.0" description = "Prometheus exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -22,7 +22,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] once_cell = "1.17" opentelemetry = { version = "0.21", path = "../opentelemetry", default-features = false, features = ["metrics"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", default-features = false, features = ["metrics"] } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" diff --git a/opentelemetry-proto/CHANGELOG.md b/opentelemetry-proto/CHANGELOG.md index 47d05ff173..7d9002c179 100644 --- a/opentelemetry-proto/CHANGELOG.md +++ b/opentelemetry-proto/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.4.0 + ### Added - Implement tonic metrics proto transformations (#1184) diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index dda2b36ace..3a6d761b25 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-proto" -version = "0.3.0" +version = "0.4.0" description = "Protobuf generated files and transformations." homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-proto" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-proto" @@ -47,7 +47,7 @@ grpcio = { version = "0.12", optional = true, features = ["prost-codec"] } tonic = { version = "0.9.0", default-features = false, optional = true, features = ["codegen", "prost"] } prost = { version = "0.11.0", optional = true } opentelemetry = { version = "0.21", default-features = false, path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", default-features = false, path = "../opentelemetry-sdk" } +opentelemetry_sdk = { version = "0.21", default-features = false, path = "../opentelemetry-sdk" } serde = { version = "1.0", optional = true, features = ["serde_derive"] } [dev-dependencies] diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index fb5712ecd7..2e3efa34e3 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.21.0 + ### Added - Log warning if two instruments have the same name with different (#1266) diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 838bea6341..a755a75734 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry_sdk" -version = "0.20.0" +version = "0.21.0" description = "The SDK for the OpenTelemetry metrics collection and distributed tracing framework" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -11,7 +11,7 @@ rust-version = "1.65" [dependencies] opentelemetry = { version = "0.21", path = "../opentelemetry/" } -opentelemetry-http = { version = "0.9", path = "../opentelemetry-http", optional = true } +opentelemetry-http = { version = "0.10", path = "../opentelemetry-http", optional = true } async-std = { version = "1.6", features = ["unstable"], optional = true } async-trait = { version = "0.1", optional = true } crossbeam-channel = { version = "0.5", optional = true } diff --git a/opentelemetry-semantic-conventions/CHANGELOG.md b/opentelemetry-semantic-conventions/CHANGELOG.md index 33d43e9f90..6002c93754 100644 --- a/opentelemetry-semantic-conventions/CHANGELOG.md +++ b/opentelemetry-semantic-conventions/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.13.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-semantic-conventions/Cargo.toml b/opentelemetry-semantic-conventions/Cargo.toml index c235799548..90627bd1df 100644 --- a/opentelemetry-semantic-conventions/Cargo.toml +++ b/opentelemetry-semantic-conventions/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-semantic-conventions" -version = "0.12.0" +version = "0.13.0" description = "Semantic conventions for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index f64d5f5743..e65cf8f445 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.18.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml index 8bab82d6b4..b1a3f98e8e 100644 --- a/opentelemetry-stackdriver/Cargo.toml +++ b/opentelemetry-stackdriver/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-stackdriver" -version = "0.17.0" +version = "0.18.0" description = "A Rust opentelemetry exporter that uploads traces to Google Stackdriver trace." documentation = "https://docs.rs/opentelemetry-stackdriver/" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -17,8 +17,8 @@ http = "0.2" hyper = "0.14.2" hyper-rustls = { version = "0.24", optional = true } opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk" } -opentelemetry-semantic-conventions = { version = "0.12", path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk" } +opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } prost = "0.11.0" prost-types = "0.11.1" thiserror = "1.0.30" diff --git a/opentelemetry-stdout/CHANGELOG.md b/opentelemetry-stdout/CHANGELOG.md index 5f6520f752..f06970d406 100644 --- a/opentelemetry-stdout/CHANGELOG.md +++ b/opentelemetry-stdout/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## v0.2.0 + +### Changed + - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) - Timestamp is additionally exported in user-friendly format. [#1192](https://github.com/open-telemetry/opentelemetry-rust/pull/1192). diff --git a/opentelemetry-stdout/Cargo.toml b/opentelemetry-stdout/Cargo.toml index 0b216950d0..35ab1f5e2f 100644 --- a/opentelemetry-stdout/Cargo.toml +++ b/opentelemetry-stdout/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-stdout" -version = "0.1.0" +version = "0.2.0" description = "An OpenTelemetry exporter for stdout" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-stdout" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-stdout" @@ -26,12 +26,12 @@ chrono = { version = "0.4.22", default-features = false, features = ["clock"] } thiserror = { version = "1", optional = true } futures-util = { version = "0.3", optional = true, default-features = false } opentelemetry = { version = "0.21", path = "../opentelemetry", default_features = false } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", default_features = false } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", default_features = false } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" ordered-float = "4.0" [dev-dependencies] -opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["metrics"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["rt-tokio", "metrics"] } +opentelemetry = { path = "../opentelemetry", features = ["metrics"] } +opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["rt-tokio", "metrics"] } tokio = { version = "1.27", features = ["full"] } diff --git a/opentelemetry-user-events-logs/CHANGELOG.md b/opentelemetry-user-events-logs/CHANGELOG.md index 1af20b8dca..c61e4476ea 100644 --- a/opentelemetry-user-events-logs/CHANGELOG.md +++ b/opentelemetry-user-events-logs/CHANGELOG.md @@ -2,6 +2,8 @@ ## Unreleased +## v0.2.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-user-events-logs/Cargo.toml b/opentelemetry-user-events-logs/Cargo.toml index 21bff5b160..708ab85855 100644 --- a/opentelemetry-user-events-logs/Cargo.toml +++ b/opentelemetry-user-events-logs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "opentelemetry-user-events-logs" description = "OpenTelemetry-Rust exporter to userevents" -version = "0.1.0" +version = "0.2.0" edition = "2021" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-logs" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-logs" @@ -14,7 +14,7 @@ license = "Apache-2.0" eventheader = "0.3.2" eventheader_dynamic = "0.3.3" opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["logs"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["logs"] } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["logs"] } async-std = { version="1.6" } async-trait = { version="0.1" } chrono = { version="0.4", default-features = false, features=["std"] } diff --git a/opentelemetry-user-events-metrics/CHANGELOG.md b/opentelemetry-user-events-metrics/CHANGELOG.md index f6ca50f1ec..6103307eff 100644 --- a/opentelemetry-user-events-metrics/CHANGELOG.md +++ b/opentelemetry-user-events-metrics/CHANGELOG.md @@ -2,6 +2,8 @@ ## Unreleased +## v0.2.0 + - Fix aggregation selector and temporality so every instruments are aggregated correctly with expected delta temporality. [#1287](https://github.com/open-telemetry/opentelemetry-rust/pull/1287). diff --git a/opentelemetry-user-events-metrics/Cargo.toml b/opentelemetry-user-events-metrics/Cargo.toml index 036f0ae008..7b8bbd1607 100644 --- a/opentelemetry-user-events-metrics/Cargo.toml +++ b/opentelemetry-user-events-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-user-events-metrics" -version = "0.1.0" +version = "0.2.0" description = "OpenTelemetry metrics exporter to user events" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-metrics" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-metrics" @@ -12,8 +12,8 @@ rust-version = "1.65" [dependencies] opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["metrics"] } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } -opentelemetry-proto = { version = "0.3", path = "../opentelemetry-proto", features = ["gen-tonic", "metrics"] } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } +opentelemetry-proto = { version = "0.4", path = "../opentelemetry-proto", features = ["gen-tonic", "metrics"] } eventheader = { version = "= 0.3.2" } async-trait = "0.1" prost = "0.11" diff --git a/opentelemetry-zipkin/CHANGELOG.md b/opentelemetry-zipkin/CHANGELOG.md index 9057f286e0..f4d6dfbade 100644 --- a/opentelemetry-zipkin/CHANGELOG.md +++ b/opentelemetry-zipkin/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.19.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index a2d973fe8d..fd5f0436a3 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-zipkin" -version = "0.18.0" +version = "0.19.0" description = "Zipkin exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" @@ -29,9 +29,9 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.20", path = "../opentelemetry-sdk", features = ["trace"] } -opentelemetry-http = { version = "0.9", path = "../opentelemetry-http" } -opentelemetry-semantic-conventions = { version = "0.12", path = "../opentelemetry-semantic-conventions" } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["trace"] } +opentelemetry-http = { version = "0.10", path = "../opentelemetry-http" } +opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } typed-builder = "0.12" diff --git a/opentelemetry-zpages/CHANGELOG.md b/opentelemetry-zpages/CHANGELOG.md index 183e3c9ab8..d6d233d59f 100644 --- a/opentelemetry-zpages/CHANGELOG.md +++ b/opentelemetry-zpages/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.6.0 + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml index a1eb57a1de..93014e3832 100644 --- a/opentelemetry-zpages/Cargo.toml +++ b/opentelemetry-zpages/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-zpages" -version = "0.5.0" +version = "0.6.0" description = "ZPages implementation for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" @@ -20,9 +20,9 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { path = "../opentelemetry" } -opentelemetry_sdk = { path = "../opentelemetry-sdk", default-features = false, features = ["trace"] } -opentelemetry-proto = { path = "../opentelemetry-proto", features = ["zpages", "gen-tonic", "with-serde"], default-features = false } +opentelemetry = { version = "0.21", path = "../opentelemetry" } +opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", default-features = false, features = ["trace"] } +opentelemetry-proto = { version = "0.4", path = "../opentelemetry-proto", features = ["zpages", "gen-tonic", "with-serde"], default-features = false } async-channel = "1.6" futures-channel = "0.3" futures-util = { version = "0.3", default-features = false, features = ["std"] } diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index ebb4cfa336..d6d35a8616 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## [v0.21.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.20.0...v0.21.0) + +This release should been seen as 1.0-rc4 following 1.0-rc3 in v0.20.0. Refer to CHANGELOG.md in individual creates for details on changes made in different creates. + ### Changed - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) From 4cff5c69b3039dcb8ea995f076bd707e2d2c2702 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Tue, 7 Nov 2023 08:19:12 -0800 Subject: [PATCH 06/68] Span links stored as Vector instead of EvictedQueue (#1313) --- .../src/exporter/model/mod.rs | 8 ++- opentelemetry-jaeger/src/exporter/mod.rs | 4 +- opentelemetry-proto/src/transform/trace.rs | 4 +- opentelemetry-sdk/CHANGELOG.md | 16 ++++++ .../benches/batch_span_processor.rs | 4 +- opentelemetry-sdk/src/export/trace.rs | 4 +- opentelemetry-sdk/src/testing/trace/mod.rs | 4 +- opentelemetry-sdk/src/trace/links.rs | 31 +++++++++++ opentelemetry-sdk/src/trace/mod.rs | 55 +++++++++++++++++-- opentelemetry-sdk/src/trace/span.rs | 42 ++++++++++++-- opentelemetry-sdk/src/trace/span_processor.rs | 4 +- opentelemetry-sdk/src/trace/tracer.rs | 37 +++++++++---- opentelemetry-stackdriver/src/lib.rs | 5 +- opentelemetry-stdout/src/trace/transform.rs | 10 ++-- .../src/exporter/model/span.rs | 3 +- 15 files changed, 188 insertions(+), 43 deletions(-) create mode 100644 opentelemetry-sdk/src/trace/links.rs diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs index bd42246e90..cecc080429 100644 --- a/opentelemetry-datadog/src/exporter/model/mod.rs +++ b/opentelemetry-datadog/src/exporter/model/mod.rs @@ -193,7 +193,11 @@ pub(crate) mod tests { trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, KeyValue, }; - use opentelemetry_sdk::{self, trace::EvictedQueue, InstrumentationLibrary, Resource}; + use opentelemetry_sdk::{ + self, + trace::{EvictedQueue, SpanLinks}, + InstrumentationLibrary, Resource, + }; use std::borrow::Cow; use std::time::{Duration, SystemTime}; @@ -216,7 +220,7 @@ pub(crate) mod tests { let capacity = 3; let attributes = vec![KeyValue::new("span.type", "web")]; let events = EvictedQueue::new(capacity); - let links = EvictedQueue::new(capacity); + let links = SpanLinks::default(); let resource = Resource::new(vec![KeyValue::new("host.name", "test")]); trace::SpanData { diff --git a/opentelemetry-jaeger/src/exporter/mod.rs b/opentelemetry-jaeger/src/exporter/mod.rs index be3ebe4c32..82bc7089a7 100644 --- a/opentelemetry-jaeger/src/exporter/mod.rs +++ b/opentelemetry-jaeger/src/exporter/mod.rs @@ -102,7 +102,7 @@ impl SpanExporter for Exporter { } } -fn links_to_references(links: EvictedQueue) -> Option> { +fn links_to_references(links: &[Link]) -> Option> { if !links.is_empty() { let refs = links .iter() @@ -139,7 +139,7 @@ fn convert_otel_span_into_jaeger_span(span: SpanData, export_instrument_lib: boo span_id: i64::from_be_bytes(span.span_context.span_id().to_bytes()), parent_span_id: i64::from_be_bytes(span.parent_span_id.to_bytes()), operation_name: span.name.into_owned(), - references: links_to_references(span.links), + references: links_to_references(span.links.as_ref()), flags: span.span_context.trace_flags().to_u8() as i32, start_time: span .start_time diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 77e788167b..1a16b070e9 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -93,7 +93,7 @@ pub mod tonic { dropped_attributes_count: event.dropped_attributes_count, }) .collect(), - dropped_links_count: source_span.links.dropped_count(), + dropped_links_count: source_span.links.dropped_count, links: source_span.links.into_iter().map(Into::into).collect(), status: Some(Status { code: status::StatusCode::from(&source_span.status).into(), @@ -204,7 +204,7 @@ pub mod grpcio { dropped_attributes_count: event.dropped_attributes_count, }) .collect(), - dropped_links_count: source_span.links.dropped_count(), + dropped_links_count: source_span.links.dropped_count, links: source_span.links.into_iter().map(Into::into).collect(), status: Some(Status { code: status::StatusCode::from(&source_span.status).into(), diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 2e3efa34e3..1fb63971f0 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,6 +2,22 @@ ## vNext +### Changed + +- **Breaking** +[#1313](https://github.com/open-telemetry/opentelemetry-rust/issues/1313) + Changes how Span links are stored to achieve performance gains. See below for + details: + + *Behavior Change*: When enforcing `max_links_per_span` from `SpanLimits`, + links are kept in the first-come order. The previous "eviction" based approach + is no longer performed. + + *Breaking Change Affecting Exporter authors*: + + `SpanData` now stores `links` as `SpanLinks` instead of `EvictedQueue` where + `SpanLinks` is a struct with a `Vec` of links and `dropped_count`. + ## v0.21.0 ### Added diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index 8e296834d3..e5bea6dcd6 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -5,7 +5,7 @@ use opentelemetry::trace::{ use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::runtime::Tokio; use opentelemetry_sdk::testing::trace::NoopSpanExporter; -use opentelemetry_sdk::trace::{BatchSpanProcessor, EvictedQueue, SpanProcessor}; +use opentelemetry_sdk::trace::{BatchSpanProcessor, EvictedQueue, SpanLinks, SpanProcessor}; use opentelemetry_sdk::Resource; use std::borrow::Cow; use std::sync::Arc; @@ -30,7 +30,7 @@ fn get_span_data() -> Vec { attributes: Vec::new(), dropped_attributes_count: 0, events: EvictedQueue::new(12), - links: EvictedQueue::new(12), + links: SpanLinks::default(), status: Status::Unset, resource: Cow::Owned(Resource::empty()), instrumentation_lib: Default::default(), diff --git a/opentelemetry-sdk/src/export/trace.rs b/opentelemetry-sdk/src/export/trace.rs index 6c421f2f27..c75a0dd94a 100644 --- a/opentelemetry-sdk/src/export/trace.rs +++ b/opentelemetry-sdk/src/export/trace.rs @@ -1,7 +1,7 @@ //! Trace exporters use crate::Resource; use futures_util::future::BoxFuture; -use opentelemetry::trace::{Event, Link, SpanContext, SpanId, SpanKind, Status, TraceError}; +use opentelemetry::trace::{Event, SpanContext, SpanId, SpanKind, Status, TraceError}; use opentelemetry::KeyValue; use std::borrow::Cow; use std::fmt::Debug; @@ -89,7 +89,7 @@ pub struct SpanData { /// Span events pub events: crate::trace::EvictedQueue, /// Span Links - pub links: crate::trace::EvictedQueue, + pub links: crate::trace::SpanLinks, /// Span status pub status: Status, /// Resource contains attributes representing an entity that produced this span. diff --git a/opentelemetry-sdk/src/testing/trace/mod.rs b/opentelemetry-sdk/src/testing/trace/mod.rs index 09f8232637..98b8ef3c86 100644 --- a/opentelemetry-sdk/src/testing/trace/mod.rs +++ b/opentelemetry-sdk/src/testing/trace/mod.rs @@ -7,7 +7,7 @@ use crate::{ trace::{ExportResult, SpanData, SpanExporter}, ExportError, }, - trace::{Config, EvictedQueue}, + trace::{Config, EvictedQueue, SpanLinks}, InstrumentationLibrary, }; use async_trait::async_trait; @@ -37,7 +37,7 @@ pub fn new_test_export_span_data() -> SpanData { attributes: Vec::new(), dropped_attributes_count: 0, events: EvictedQueue::new(config.span_limits.max_events_per_span), - links: EvictedQueue::new(config.span_limits.max_links_per_span), + links: SpanLinks::default(), status: Status::Unset, resource: config.resource, instrumentation_lib: InstrumentationLibrary::default(), diff --git a/opentelemetry-sdk/src/trace/links.rs b/opentelemetry-sdk/src/trace/links.rs new file mode 100644 index 0000000000..1810c46367 --- /dev/null +++ b/opentelemetry-sdk/src/trace/links.rs @@ -0,0 +1,31 @@ +//! # Span Links + +use std::ops::Deref; + +use opentelemetry::trace::Link; +/// Stores span links along with dropped count. +#[derive(Clone, Debug, Default, PartialEq)] +#[non_exhaustive] +pub struct SpanLinks { + /// The links stored as a vector. Could be empty if there are no links. + pub links: Vec, + /// The number of links dropped from the span. + pub dropped_count: u32, +} + +impl Deref for SpanLinks { + type Target = [Link]; + + fn deref(&self) -> &Self::Target { + &self.links + } +} + +impl IntoIterator for SpanLinks { + type Item = Link; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.links.into_iter() + } +} diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index 3ef46c4925..e064e85e4a 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -10,6 +10,7 @@ mod config; mod evicted_hash_map; mod evicted_queue; mod id_generator; +mod links; mod provider; mod sampler; mod span; @@ -21,6 +22,7 @@ pub use config::{config, Config}; pub use evicted_hash_map::EvictedHashMap; pub use evicted_queue::EvictedQueue; pub use id_generator::{aws::XrayIdGenerator, IdGenerator, RandomIdGenerator}; +pub use links::SpanLinks; pub use provider::{Builder, TracerProvider}; pub use sampler::{Sampler, ShouldSample}; pub use span::Span; @@ -39,14 +41,19 @@ mod runtime_tests; #[cfg(all(test, feature = "testing"))] mod tests { use super::*; - use crate::testing::trace::InMemorySpanExporterBuilder; + use crate::{ + testing::trace::InMemorySpanExporterBuilder, trace::span_limit::DEFAULT_MAX_LINKS_PER_SPAN, + }; use opentelemetry::{ - trace::{Span, Tracer, TracerProvider as _}, + trace::{ + Link, Span, SpanBuilder, SpanContext, SpanId, TraceFlags, TraceId, Tracer, + TracerProvider as _, + }, KeyValue, }; #[test] - fn tracing_in_span() { + fn in_span() { // Arrange let exporter = InMemorySpanExporterBuilder::new().build(); let provider = TracerProvider::builder() @@ -70,7 +77,7 @@ mod tests { } #[test] - fn tracing_tracer_start() { + fn tracer_start() { // Arrange let exporter = InMemorySpanExporterBuilder::new().build(); let provider = TracerProvider::builder() @@ -93,4 +100,44 @@ mod tests { assert_eq!(span.name, "span_name"); assert_eq!(span.instrumentation_lib.name, "test_tracer"); } + + #[test] + fn exceed_span_links_limit() { + // Arrange + let exporter = InMemorySpanExporterBuilder::new().build(); + let provider = TracerProvider::builder() + .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) + .build(); + + // Act + let tracer = provider.tracer("test_tracer"); + + let mut links = Vec::new(); + for _i in 0..(DEFAULT_MAX_LINKS_PER_SPAN * 2) { + links.push(Link::new( + SpanContext::new( + TraceId::from_u128(12), + SpanId::from_u64(12), + TraceFlags::default(), + false, + Default::default(), + ), + Vec::new(), + )) + } + + let span_builder = SpanBuilder::from_name("span_name").with_links(links); + let mut span = tracer.build(span_builder); + span.end(); + provider.force_flush(); + + // Assert + let exported_spans = exporter + .get_finished_spans() + .expect("Spans are expected to be exported."); + assert_eq!(exported_spans.len(), 1); + let span = &exported_spans[0]; + assert_eq!(span.name, "span_name"); + assert_eq!(span.links.len(), DEFAULT_MAX_LINKS_PER_SPAN as usize); + } } diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index dbd35bf443..2839c4114e 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -44,7 +44,7 @@ pub(crate) struct SpanData { /// Span events pub(crate) events: crate::trace::EvictedQueue, /// Span Links - pub(crate) links: crate::trace::EvictedQueue, + pub(crate) links: crate::trace::SpanLinks, /// Span status pub(crate) status: Status, } @@ -252,8 +252,9 @@ mod tests { use crate::testing::trace::NoopSpanExporter; use crate::trace::span_limit::{ DEFAULT_MAX_ATTRIBUTES_PER_EVENT, DEFAULT_MAX_ATTRIBUTES_PER_LINK, - DEFAULT_MAX_ATTRIBUTES_PER_SPAN, + DEFAULT_MAX_ATTRIBUTES_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN, }; + use crate::trace::SpanLinks; use opentelemetry::trace::{Link, SpanBuilder, TraceFlags, TraceId, Tracer}; use opentelemetry::{trace::Span as _, trace::TracerProvider, KeyValue}; use std::time::Duration; @@ -272,7 +273,7 @@ mod tests { attributes: Vec::new(), dropped_attributes_count: 0, events: crate::trace::EvictedQueue::new(config.span_limits.max_events_per_span), - links: crate::trace::EvictedQueue::new(config.span_limits.max_links_per_span), + links: SpanLinks::default(), status: Status::Unset, }; (tracer, data) @@ -610,11 +611,44 @@ mod tests { .clone() .expect("span data should not be empty as we already set it before") .links; - let link_vec: Vec<_> = link_queue.iter().collect(); + let link_vec: Vec<_> = link_queue.links; let processed_link = link_vec.get(0).expect("should have at least one link"); assert_eq!(processed_link.attributes.len(), 128); } + #[test] + fn exceed_span_links_limit() { + let exporter = NoopSpanExporter::new(); + let provider_builder = + crate::trace::TracerProvider::builder().with_simple_exporter(exporter); + let provider = provider_builder.build(); + let tracer = provider.tracer("opentelemetry-test"); + + let mut links = Vec::new(); + for _i in 0..(DEFAULT_MAX_LINKS_PER_SPAN * 2) { + links.push(Link::new( + SpanContext::new( + TraceId::from_u128(12), + SpanId::from_u64(12), + TraceFlags::default(), + false, + Default::default(), + ), + Vec::new(), + )) + } + + let span_builder = tracer.span_builder("test").with_links(links); + let span = tracer.build(span_builder); + let link_queue = span + .data + .clone() + .expect("span data should not be empty as we already set it before") + .links; + let link_vec: Vec<_> = link_queue.links; + assert_eq!(link_vec.len(), DEFAULT_MAX_LINKS_PER_SPAN as usize); + } + #[test] fn test_span_exported_data() { let provider = crate::trace::TracerProvider::builder() diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 73c92f6903..395cdd73be 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -721,7 +721,7 @@ mod tests { use crate::testing::trace::{ new_test_export_span_data, new_test_exporter, new_tokio_test_exporter, }; - use crate::trace::{BatchConfig, EvictedQueue}; + use crate::trace::{BatchConfig, EvictedQueue, SpanLinks}; use async_trait::async_trait; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::Debug; @@ -751,7 +751,7 @@ mod tests { attributes: Vec::new(), dropped_attributes_count: 0, events: EvictedQueue::new(0), - links: EvictedQueue::new(0), + links: SpanLinks::default(), status: Status::Unset, resource: Default::default(), instrumentation_lib: Default::default(), diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index e9dd408f5b..4b918a2fe4 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -11,11 +11,10 @@ use crate::{ trace::{ provider::{TracerProvider, TracerProviderInner}, span::{Span, SpanData}, - Config, EvictedQueue, SpanLimits, + Config, EvictedQueue, SpanLimits, SpanLinks, }, InstrumentationLibrary, }; -use once_cell::sync::Lazy; use opentelemetry::{ trace::{ Link, SamplingDecision, SamplingResult, SpanBuilder, SpanContext, SpanId, SpanKind, @@ -120,8 +119,6 @@ impl Tracer { } } -static EMPTY_ATTRIBUTES: Lazy> = Lazy::new(Default::default); - impl opentelemetry::trace::Tracer for Tracer { /// This implementation of `Tracer` produces `sdk::Span` instances. type Span = Span; @@ -181,7 +178,7 @@ impl opentelemetry::trace::Tracer for Tracer { trace_id, &builder.name, &span_kind, - builder.attributes.as_ref().unwrap_or(&EMPTY_ATTRIBUTES), + builder.attributes.as_ref().unwrap_or(&Vec::new()), builder.links.as_deref().unwrap_or(&[]), provider.config(), ) @@ -210,18 +207,34 @@ impl opentelemetry::trace::Tracer for Tracer { attribute_options.truncate(span_attributes_limit); let dropped_attributes_count = dropped_attributes_count as u32; - let mut link_options = builder.links.take(); - let mut links = EvictedQueue::new(span_limits.max_links_per_span); - if let Some(link_options) = &mut link_options { + // Links are available as Option> in the builder + // If it is None, then there are no links to process. + // In that case Span.Links will be default (empty Vec, 0 drop count) + // Otherwise, truncate Vec to keep until limits and use that in Span.Links. + // Store the count of excess links into Span.Links.dropped_count. + // There is no ability today to add Links after Span creation, + // but such a capability will be needed in the future + // once the spec for that stabilizes. + + let spans_links_limit = span_limits.max_links_per_span as usize; + let span_links: SpanLinks = if let Some(mut links) = builder.links.take() { + let dropped_count = links.len().saturating_sub(spans_links_limit); + links.truncate(spans_links_limit); let link_attributes_limit = span_limits.max_attributes_per_link as usize; - for link in link_options.iter_mut() { + for link in links.iter_mut() { let dropped_attributes_count = link.attributes.len().saturating_sub(link_attributes_limit); link.attributes.truncate(link_attributes_limit); link.dropped_attributes_count = dropped_attributes_count as u32; } - links.append_vec(link_options); - } + SpanLinks { + links, + dropped_count: dropped_count as u32, + } + } else { + SpanLinks::default() + }; + let start_time = start_time.unwrap_or_else(opentelemetry::time::now); let end_time = end_time.unwrap_or(start_time); let mut events_queue = EvictedQueue::new(span_limits.max_events_per_span); @@ -250,7 +263,7 @@ impl opentelemetry::trace::Tracer for Tracer { attributes: attribute_options, dropped_attributes_count, events: events_queue, - links, + links: span_links, status, }), self.clone(), diff --git a/opentelemetry-stackdriver/src/lib.rs b/opentelemetry-stackdriver/src/lib.rs index af75a78cb9..af6232f5f5 100644 --- a/opentelemetry-stackdriver/src/lib.rs +++ b/opentelemetry-stackdriver/src/lib.rs @@ -35,7 +35,6 @@ use opentelemetry_sdk::{ trace::{ExportResult, SpanData, SpanExporter}, ExportError, }, - trace::EvictedQueue, Resource, }; use opentelemetry_semantic_conventions::resource::SERVICE_NAME; @@ -765,13 +764,13 @@ impl From<(Vec, &Resource)> for Attributes { } } -fn transform_links(links: &EvictedQueue) -> Option { +fn transform_links(links: &opentelemetry_sdk::trace::SpanLinks) -> Option { if links.is_empty() { return None; } Some(Links { - dropped_links_count: links.dropped_count() as i32, + dropped_links_count: links.dropped_count as i32, link: links .iter() .map(|link| Link { diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index bec34301fb..ac3e34a50c 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -109,8 +109,8 @@ impl From for Span { attributes: value.attributes.into_iter().map(Into::into).collect(), dropped_events_count: value.events.dropped_count(), events: value.events.into_iter().map(Into::into).collect(), - dropped_links_count: value.links.dropped_count(), - links: value.links.into_iter().map(Into::into).collect(), + dropped_links_count: value.links.dropped_count, + links: value.links.iter().map(Into::into).collect(), status: value.status.into(), } } @@ -177,13 +177,13 @@ struct Link { dropped_attributes_count: u32, } -impl From for Link { - fn from(value: opentelemetry::trace::Link) -> Self { +impl From<&opentelemetry::trace::Link> for Link { + fn from(value: &opentelemetry::trace::Link) -> Self { Link { trace_id: value.span_context.trace_id().to_string(), span_id: value.span_context.span_id().to_string(), trace_state: Some(value.span_context.trace_state().header()).filter(|s| !s.is_empty()), - attributes: value.attributes.into_iter().map(Into::into).collect(), + attributes: value.attributes.iter().map(Into::into).collect(), dropped_attributes_count: value.dropped_attributes_count, } } diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 83507597f9..1b054a1db2 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -60,6 +60,7 @@ mod tests { use crate::exporter::model::span::{Kind, Span}; use crate::exporter::model::{into_zipkin_span, OTEL_ERROR_DESCRIPTION, OTEL_STATUS_CODE}; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId}; + use opentelemetry_sdk::trace::SpanLinks; use opentelemetry_sdk::{export::trace::SpanData, trace::EvictedQueue, Resource}; use std::borrow::Cow; use std::collections::HashMap; @@ -163,7 +164,7 @@ mod tests { attributes: Vec::new(), dropped_attributes_count: 0, events: EvictedQueue::new(20), - links: EvictedQueue::new(20), + links: SpanLinks::default(), status, resource: Cow::Owned(Resource::default()), instrumentation_lib: Default::default(), From ab969bf5719e422040b6e1a073ee7183ef3eed76 Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Tue, 7 Nov 2023 17:36:49 +0100 Subject: [PATCH 07/68] Requesting Maintainership for h.dost (#1342) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d192d92c6..9451f9e427 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -140,13 +140,13 @@ For GitHub groups see the [code owners](CODEOWNERS) file. ### Maintainers * [Dirkjan Ochtman](https://github.com/djc) +* [Harold Dost](https://github.com/hdost) * [Julian Tescher](https://github.com/jtescher) * [Zhongyang Wu](https://github.com/TommyCpp) ### Approvers * [Cijo Thomas](https://github.com/cijothomas) -* [Harold Dost](https://github.com/hdost) * [Lalit Kumar Bhasin](https://github.com/lalitb) * [Shaun Cox](https://github.com/shaun-cox) From 6c3dc789fb3c337a66144cc36a6a930534f57d92 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Tue, 7 Nov 2023 08:38:50 -0800 Subject: [PATCH 08/68] Add jaeger tracing example to main examples (#1345) --- Cargo.toml | 1 + examples/tracing-jaeger/Cargo.toml | 13 ++++++++ examples/tracing-jaeger/README.md | 19 ++++++++++++ examples/tracing-jaeger/src/main.rs | 48 +++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+) create mode 100644 examples/tracing-jaeger/Cargo.toml create mode 100644 examples/tracing-jaeger/README.md create mode 100644 examples/tracing-jaeger/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index 39ee5c00b0..52eec49037 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ members = [ "examples/logs-basic", "examples/traceresponse", "examples/tracing-grpc", + "examples/tracing-jaeger", "stress", ] resolver = "2" diff --git a/examples/tracing-jaeger/Cargo.toml b/examples/tracing-jaeger/Cargo.toml new file mode 100644 index 0000000000..afafd3fdb1 --- /dev/null +++ b/examples/tracing-jaeger/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "tracing-jaeger" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +publish = false + +[dependencies] +opentelemetry = { path = "../../opentelemetry" } +opentelemetry-jaeger = { path = "../../opentelemetry-jaeger" } +opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] } +opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic"] } +tokio = { version = "1.0", features = ["full"] } diff --git a/examples/tracing-jaeger/README.md b/examples/tracing-jaeger/README.md new file mode 100644 index 0000000000..20adc19f17 --- /dev/null +++ b/examples/tracing-jaeger/README.md @@ -0,0 +1,19 @@ +# Exporting traces to Jaeger + +This example shows how to export spans to Jaeger agent using OTLPExporter. + +## Usage + +Launch the example app with Jaeger running in background via docker: + +```shell + +# Run jaeger in background with native OTLP Ingestion +$ docker run -d -p16686:16686 -p4317:4317 -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:latest + +# Run the app +$ cargo run + +# View spans +$ firefox http://localhost:16686/ +``` diff --git a/examples/tracing-jaeger/src/main.rs b/examples/tracing-jaeger/src/main.rs new file mode 100644 index 0000000000..e80f028390 --- /dev/null +++ b/examples/tracing-jaeger/src/main.rs @@ -0,0 +1,48 @@ +use opentelemetry::global::shutdown_tracer_provider; +use opentelemetry::{ + global, + trace::{TraceContextExt, TraceError, Tracer}, + KeyValue, +}; +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; +use std::error::Error; + +fn init_tracer() -> Result { + opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter( + opentelemetry_otlp::new_exporter() + .tonic() + .with_endpoint("http://localhost:4317"), + ) + .with_trace_config( + sdktrace::config().with_resource(Resource::new(vec![KeyValue::new( + "service.name", + "tracing-jaeger", + )])), + ) + .install_batch(runtime::Tokio) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let _tracer = init_tracer().expect("Failed to initialize tracer."); + + let tracer = global::tracer("tracing-jaeger"); + tracer.in_span("main-operation", |cx| { + let span = cx.span(); + span.set_attribute(KeyValue::new("my-attribute", "my-value")); + span.add_event( + "Main span event".to_string(), + vec![KeyValue::new("foo", "1")], + ); + tracer.in_span("child-operation...", |cx| { + let span = cx.span(); + span.add_event("Sub span event", vec![KeyValue::new("bar", "1")]); + }); + }); + + shutdown_tracer_provider(); + Ok(()) +} From 4c3f8a04844ca702f38aa2ed4f3c6dbc0de0b138 Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Tue, 7 Nov 2023 17:40:04 +0100 Subject: [PATCH 09/68] Add Issue Templates. (#1347) Signed-off-by: Harold Dost --- .github/ISSUE_TEMPLATE/BUG-REPORT.yml | 51 ++++++++++++++++++++++ .github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml | 47 ++++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/BUG-REPORT.yml create mode 100644 .github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml new file mode 100644 index 0000000000..57020c87ee --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -0,0 +1,51 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug", "triage:todo"] +projects: ["open-telemetry/opentelemetry-rust"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + placeholder: Tell us what you see! + value: "A bug happened!" + validations: + required: true + - type: textarea + id: api-version + attributes: + label: API Version + description: What version of the OpenTelemetry API are you using? + placeholder: 0.x, 1.x, etc. + validations: + required: true + - type: textarea + id: sdk-version + attributes: + label: SDK Version + description: What version of the OpenTelemetry SDK are you using? + placeholder: 0.x, 1.x, etc. + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What Exporters are you seeing the problem on? + multiple: true + options: + - OTLP + - Zipkin + - Jaeger (Deprecated) + - N/A + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + render: shell diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml new file mode 100644 index 0000000000..54a66821eb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -0,0 +1,47 @@ +name: Feature Request +description: Request a feature for the OpenTelemetry Rust implementation. +title: "[Feature]: " +labels: ["enhancement", "triage:todo"] +projects: ["open-telemetry/opentelemetry-rust"] +body: + - type: markdown + attributes: + value: | + Thanks for using our library and trying to make it better! + + Before opening a feature request against this repo, consider whether the feature + should/could be implemented in the [other OpenTelemetry client + libraries](https://github.com/open-telemetry/). If so, please [open an issue on + opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first. + - type: textarea + id: related-problem + attributes: + label: Related Problems? + description: Is your feature request related to a problem? If so, provide a concise description of the problem. + placeholder: Include the Issue ID from this or other repos. + validations: + required: false + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like: + description: What do you want to happen instead? What is the expected behavior? + placeholder: I'd like the api to ... + validations: + required: true + - type: textarea + id: alternatives + attributes: + label: Considered Alternatives + description: Which alternative solutions or features have you considered? + placeholder: Some potential solutions + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context about the feature request here. + placeholder: Some related requests in other project or upstream spec proposals. + validations: + required: false From 5d5296b138b5d440aed82d0f0f702f95761c7b54 Mon Sep 17 00:00:00 2001 From: Shaun Cox Date: Wed, 8 Nov 2023 10:12:26 -0600 Subject: [PATCH 10/68] More distinct naming of metrics constructs in the SDK (#1328) --- examples/metrics-advanced/src/main.rs | 8 ++- examples/metrics-basic/src/main.rs | 6 +- .../examples/basic-otlp-http/src/main.rs | 2 +- .../examples/basic-otlp/src/main.rs | 4 +- opentelemetry-otlp/src/metric.rs | 6 +- opentelemetry-prometheus/examples/hyper.rs | 4 +- opentelemetry-prometheus/src/lib.rs | 6 +- .../tests/integration_test.rs | 8 +-- opentelemetry-sdk/CHANGELOG.md | 1 + opentelemetry-sdk/benches/metric.rs | 8 +-- opentelemetry-sdk/src/metrics/instrument.rs | 8 +-- opentelemetry-sdk/src/metrics/meter.rs | 60 +++++++++---------- .../src/metrics/meter_provider.rs | 51 +++++++++------- opentelemetry-sdk/src/metrics/mod.rs | 8 +-- opentelemetry-sdk/src/metrics/pipeline.rs | 2 +- opentelemetry-sdk/src/metrics/view.rs | 4 +- .../src/testing/metrics/in_memory_exporter.rs | 2 +- opentelemetry-stdout/examples/basic.rs | 6 +- opentelemetry-stdout/src/lib.rs | 8 +-- .../examples/basic.rs | 6 +- stress/src/metrics.rs | 4 +- 21 files changed, 111 insertions(+), 101 deletions(-) diff --git a/examples/metrics-advanced/src/main.rs b/examples/metrics-advanced/src/main.rs index 5716009cd7..760ed1b7e1 100644 --- a/examples/metrics-advanced/src/main.rs +++ b/examples/metrics-advanced/src/main.rs @@ -1,11 +1,13 @@ use opentelemetry::metrics::Unit; use opentelemetry::Key; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; -use opentelemetry_sdk::metrics::{Aggregation, Instrument, MeterProvider, PeriodicReader, Stream}; +use opentelemetry_sdk::metrics::{ + Aggregation, Instrument, PeriodicReader, SdkMeterProvider, Stream, +}; use opentelemetry_sdk::{runtime, Resource}; use std::error::Error; -fn init_meter_provider() -> MeterProvider { +fn init_meter_provider() -> SdkMeterProvider { // for example 1 let my_view_rename_and_unit = |i: &Instrument| { if i.name == "my_histogram" { @@ -48,7 +50,7 @@ fn init_meter_provider() -> MeterProvider { // Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) .build(); let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - MeterProvider::builder() + SdkMeterProvider::builder() .with_reader(reader) .with_resource(Resource::new(vec![KeyValue::new( "service.name", diff --git a/examples/metrics-basic/src/main.rs b/examples/metrics-basic/src/main.rs index c4bbe71d46..c3307d1f3a 100644 --- a/examples/metrics-basic/src/main.rs +++ b/examples/metrics-basic/src/main.rs @@ -1,17 +1,17 @@ use opentelemetry::metrics::Unit; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; -use opentelemetry_sdk::metrics::{MeterProvider, PeriodicReader}; +use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; use opentelemetry_sdk::{runtime, Resource}; use std::error::Error; -fn init_meter_provider() -> MeterProvider { +fn init_meter_provider() -> SdkMeterProvider { let exporter = opentelemetry_stdout::MetricsExporterBuilder::default() // uncomment the below lines to pretty print output. // .with_encoder(|writer, data| // Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) .build(); let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - MeterProvider::builder() + SdkMeterProvider::builder() .with_reader(reader) .with_resource(Resource::new(vec![KeyValue::new( "service.name", diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 893788475c..36b4fdae1f 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -44,7 +44,7 @@ fn init_tracer() -> Result { .install_batch(opentelemetry_sdk::runtime::Tokio) } -fn init_metrics() -> metrics::Result { +fn init_metrics() -> metrics::Result { let export_config = opentelemetry_otlp::ExportConfig { endpoint: "http://localhost:4318/v1/metrics".to_string(), ..opentelemetry_otlp::ExportConfig::default() diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 61df37c2ec..7892151176 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -12,7 +12,7 @@ use opentelemetry::{ use opentelemetry_appender_log::OpenTelemetryLogBridge; use opentelemetry_otlp::{ExportConfig, WithExportConfig}; use opentelemetry_sdk::logs::Config; -use opentelemetry_sdk::{metrics::MeterProvider, runtime, trace as sdktrace, Resource}; +use opentelemetry_sdk::{metrics::SdkMeterProvider, runtime, trace as sdktrace, Resource}; use std::error::Error; fn init_tracer() -> Result { @@ -32,7 +32,7 @@ fn init_tracer() -> Result { .install_batch(runtime::Tokio) } -fn init_metrics() -> metrics::Result { +fn init_metrics() -> metrics::Result { let export_config = ExportConfig { endpoint: "http://localhost:4317".to_string(), ..ExportConfig::default() diff --git a/opentelemetry-otlp/src/metric.rs b/opentelemetry-otlp/src/metric.rs index c1148a421b..78155aace1 100644 --- a/opentelemetry-otlp/src/metric.rs +++ b/opentelemetry-otlp/src/metric.rs @@ -18,7 +18,7 @@ use opentelemetry_sdk::{ AggregationSelector, DefaultAggregationSelector, DefaultTemporalitySelector, TemporalitySelector, }, - Aggregation, InstrumentKind, MeterProvider, PeriodicReader, + Aggregation, InstrumentKind, PeriodicReader, SdkMeterProvider, }, runtime::Runtime, Resource, @@ -215,7 +215,7 @@ where RT: Runtime, { /// Build MeterProvider - pub fn build(self) -> Result { + pub fn build(self) -> Result { let exporter = self.exporter_pipeline.build_metrics_exporter( self.temporality_selector .unwrap_or_else(|| Box::new(DefaultTemporalitySelector::new())), @@ -234,7 +234,7 @@ where let reader = builder.build(); - let mut provider = MeterProvider::builder().with_reader(reader); + let mut provider = SdkMeterProvider::builder().with_reader(reader); if let Some(resource) = self.resource { provider = provider.with_resource(resource); diff --git a/opentelemetry-prometheus/examples/hyper.rs b/opentelemetry-prometheus/examples/hyper.rs index 79787af309..943ba617b6 100644 --- a/opentelemetry-prometheus/examples/hyper.rs +++ b/opentelemetry-prometheus/examples/hyper.rs @@ -8,7 +8,7 @@ use opentelemetry::{ metrics::{Counter, Histogram, MeterProvider as _, Unit}, KeyValue, }; -use opentelemetry_sdk::metrics::MeterProvider; +use opentelemetry_sdk::metrics::SdkMeterProvider; use prometheus::{Encoder, Registry, TextEncoder}; use std::convert::Infallible; use std::sync::Arc; @@ -71,7 +71,7 @@ pub async fn main() -> Result<(), Box> { let exporter = opentelemetry_prometheus::exporter() .with_registry(registry.clone()) .build()?; - let provider = MeterProvider::builder().with_reader(exporter).build(); + let provider = SdkMeterProvider::builder().with_reader(exporter).build(); let meter = provider.meter("hyper-example"); let state = Arc::new(AppState { diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index dcd008c071..804f69dea6 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -3,8 +3,8 @@ //! [Prometheus]: https://prometheus.io //! //! ``` -//! use opentelemetry::{metrics::MeterProvider as _, KeyValue}; -//! use opentelemetry_sdk::metrics::MeterProvider; +//! use opentelemetry::{metrics::MeterProvider, KeyValue}; +//! use opentelemetry_sdk::metrics::SdkMeterProvider; //! use prometheus::{Encoder, TextEncoder}; //! //! # fn main() -> Result<(), Box> { @@ -18,7 +18,7 @@ //! .build()?; //! //! // set up a meter meter to create instruments -//! let provider = MeterProvider::builder().with_reader(exporter).build(); +//! let provider = SdkMeterProvider::builder().with_reader(exporter).build(); //! let meter = provider.meter("my-app"); //! //! // Use two instruments diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 95e95c9f16..20dabd4076 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -6,7 +6,7 @@ use opentelemetry::metrics::{Meter, MeterProvider as _, Unit}; use opentelemetry::Key; use opentelemetry::KeyValue; use opentelemetry_prometheus::ExporterBuilder; -use opentelemetry_sdk::metrics::{new_view, Aggregation, Instrument, MeterProvider, Stream}; +use opentelemetry_sdk::metrics::{new_view, Aggregation, Instrument, SdkMeterProvider, Stream}; use opentelemetry_sdk::resource::{ EnvResourceDetector, SdkProvidedResourceDetector, TelemetryResourceDetector, }; @@ -336,7 +336,7 @@ fn prometheus_exporter_integration() { )) }; - let provider = MeterProvider::builder() + let provider = SdkMeterProvider::builder() .with_resource(res) .with_reader(exporter) .with_view( @@ -395,7 +395,7 @@ fn multiple_scopes() { TELEMETRY_SDK_VERSION.string("latest"), ])); - let provider = MeterProvider::builder() + let provider = SdkMeterProvider::builder() .with_reader(exporter) .with_resource(resource) .build(); @@ -730,7 +730,7 @@ fn duplicate_metrics() { .chain(tc.custom_resource_attrs.into_iter()), )); - let provider = MeterProvider::builder() + let provider = SdkMeterProvider::builder() .with_resource(resource) .with_reader(exporter) .build(); diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 1fb63971f0..2a6d9efd68 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -30,6 +30,7 @@ ### Changed +- Renamed `MeterProvider` and `Meter` to `SdkMeterProvider` and `SdkMeter` respectively to avoid name collision with public API types. [#1328](https://github.com/open-telemetry/opentelemetry-rust/pull/1328) - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) - Default Resource (the one used when no other Resource is explicitly provided) now includes `TelemetryResourceDetector`, populating "telemetry.sdk.*" attributes. diff --git a/opentelemetry-sdk/benches/metric.rs b/opentelemetry-sdk/benches/metric.rs index 5594bc4d25..052295c279 100644 --- a/opentelemetry-sdk/benches/metric.rs +++ b/opentelemetry-sdk/benches/metric.rs @@ -11,7 +11,7 @@ use opentelemetry_sdk::{ data::{ResourceMetrics, Temporality}, new_view, reader::{AggregationSelector, MetricReader, TemporalitySelector}, - Aggregation, Instrument, InstrumentKind, ManualReader, MeterProvider, Pipeline, Stream, + Aggregation, Instrument, InstrumentKind, ManualReader, Pipeline, SdkMeterProvider, Stream, View, }, Resource, @@ -150,7 +150,7 @@ fn bench_counter(view: Option>, temporality: &str) -> (SharedReade .build(), )) }; - let mut builder = MeterProvider::builder().with_reader(rdr.clone()); + let mut builder = SdkMeterProvider::builder().with_reader(rdr.clone()); if let Some(view) = view { builder = builder.with_view(view); } @@ -367,7 +367,7 @@ fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram) { ); let r = SharedReader(Arc::new(ManualReader::default())); - let mut builder = MeterProvider::builder().with_reader(r.clone()); + let mut builder = SdkMeterProvider::builder().with_reader(r.clone()); if let Some(view) = view { builder = builder.with_view(view); } @@ -408,7 +408,7 @@ fn histograms(c: &mut Criterion) { fn benchmark_collect_histogram(b: &mut Bencher, n: usize) { let r = SharedReader(Arc::new(ManualReader::default())); - let mtr = MeterProvider::builder() + let mtr = SdkMeterProvider::builder() .with_reader(r.clone()) .build() .meter("sdk/metric/bench/histogram"); diff --git a/opentelemetry-sdk/src/metrics/instrument.rs b/opentelemetry-sdk/src/metrics/instrument.rs index 5cc44c6772..7971d47d3c 100644 --- a/opentelemetry-sdk/src/metrics/instrument.rs +++ b/opentelemetry-sdk/src/metrics/instrument.rs @@ -248,11 +248,11 @@ impl InstrumentId { } } -pub(crate) struct InstrumentImpl { +pub(crate) struct ResolvedMeasures { pub(crate) measures: Vec>>, } -impl SyncCounter for InstrumentImpl { +impl SyncCounter for ResolvedMeasures { fn add(&self, val: T, attrs: &[KeyValue]) { for measure in &self.measures { measure.call(val, AttributeSet::from(attrs)) @@ -260,7 +260,7 @@ impl SyncCounter for InstrumentImpl { } } -impl SyncUpDownCounter for InstrumentImpl { +impl SyncUpDownCounter for ResolvedMeasures { fn add(&self, val: T, attrs: &[KeyValue]) { for measure in &self.measures { measure.call(val, AttributeSet::from(attrs)) @@ -268,7 +268,7 @@ impl SyncUpDownCounter for InstrumentImpl { } } -impl SyncHistogram for InstrumentImpl { +impl SyncHistogram for ResolvedMeasures { fn record(&self, val: T, attrs: &[KeyValue]) { for measure in &self.measures { measure.call(val, AttributeSet::from(attrs)) diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index 6a7b116863..b025a9152d 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -15,7 +15,7 @@ use opentelemetry::{ use crate::instrumentation::Scope; use crate::metrics::{ instrument::{ - Instrument, InstrumentImpl, InstrumentKind, Observable, ObservableId, EMPTY_MEASURE_MSG, + Instrument, InstrumentKind, Observable, ObservableId, ResolvedMeasures, EMPTY_MEASURE_MSG, }, internal::{self, Number}, pipeline::{Pipelines, Resolver}, @@ -46,7 +46,7 @@ const INSTRUMENT_UNIT_INVALID_CHAR: &str = "characters in instrument unit must b /// See the [Meter API] docs for usage. /// /// [Meter API]: opentelemetry::metrics::Meter -pub struct Meter { +pub struct SdkMeter { scope: Scope, pipes: Arc, u64_resolver: Resolver, @@ -55,11 +55,11 @@ pub struct Meter { validation_policy: InstrumentValidationPolicy, } -impl Meter { +impl SdkMeter { pub(crate) fn new(scope: Scope, pipes: Arc) -> Self { let view_cache = Default::default(); - Meter { + SdkMeter { scope, pipes: Arc::clone(&pipes), u64_resolver: Resolver::new(Arc::clone(&pipes), Arc::clone(&view_cache)), @@ -79,7 +79,7 @@ impl Meter { } #[doc(hidden)] -impl InstrumentProvider for Meter { +impl InstrumentProvider for SdkMeter { fn u64_counter( &self, name: Cow<'static, str>, @@ -87,7 +87,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.u64_resolver); + let p = InstrumentResolver::new(self, &self.u64_resolver); p.lookup( InstrumentKind::Counter, name, @@ -104,7 +104,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); p.lookup( InstrumentKind::Counter, name, @@ -122,7 +122,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.u64_resolver); + let p = InstrumentResolver::new(self, &self.u64_resolver); let ms = p.measures( InstrumentKind::ObservableCounter, name.clone(), @@ -159,7 +159,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableCounter, name.clone(), @@ -194,7 +194,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.i64_resolver); + let p = InstrumentResolver::new(self, &self.i64_resolver); p.lookup( InstrumentKind::UpDownCounter, name, @@ -211,7 +211,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); p.lookup( InstrumentKind::UpDownCounter, name, @@ -229,7 +229,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.i64_resolver); + let p = InstrumentResolver::new(self, &self.i64_resolver); let ms = p.measures( InstrumentKind::ObservableUpDownCounter, name.clone(), @@ -268,7 +268,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableUpDownCounter, name.clone(), @@ -307,7 +307,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.u64_resolver); + let p = InstrumentResolver::new(self, &self.u64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), @@ -344,7 +344,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.i64_resolver); + let p = InstrumentResolver::new(self, &self.i64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), @@ -381,7 +381,7 @@ impl InstrumentProvider for Meter { callbacks: Vec>, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), @@ -417,7 +417,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.f64_resolver); + let p = InstrumentResolver::new(self, &self.f64_resolver); p.lookup( InstrumentKind::Histogram, name, @@ -434,7 +434,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.u64_resolver); + let p = InstrumentResolver::new(self, &self.u64_resolver); p.lookup( InstrumentKind::Histogram, name, @@ -451,7 +451,7 @@ impl InstrumentProvider for Meter { unit: Option, ) -> Result> { validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstProvider::new(self, &self.i64_resolver); + let p = InstrumentResolver::new(self, &self.i64_resolver); p.lookup( InstrumentKind::Histogram, @@ -672,36 +672,36 @@ impl ApiObserver for Observer { } } -impl fmt::Debug for Meter { +impl fmt::Debug for SdkMeter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Meter").field("scope", &self.scope).finish() } } /// Provides all OpenTelemetry instruments. -struct InstProvider<'a, T> { - meter: &'a Meter, +struct InstrumentResolver<'a, T> { + meter: &'a SdkMeter, resolve: &'a Resolver, } -impl<'a, T> InstProvider<'a, T> +impl<'a, T> InstrumentResolver<'a, T> where T: Number, { - fn new(meter: &'a Meter, resolve: &'a Resolver) -> Self { - InstProvider { meter, resolve } + fn new(meter: &'a SdkMeter, resolve: &'a Resolver) -> Self { + InstrumentResolver { meter, resolve } } - /// lookup returns the resolved InstrumentImpl. + /// lookup returns the resolved measures. fn lookup( &self, kind: InstrumentKind, name: Cow<'static, str>, description: Option>, unit: Unit, - ) -> Result> { + ) -> Result> { let aggregators = self.measures(kind, name, description, unit)?; - Ok(InstrumentImpl { + Ok(ResolvedMeasures { measures: aggregators, }) } @@ -732,7 +732,7 @@ mod tests { use opentelemetry::metrics::{InstrumentProvider, MetricsError, Unit}; use super::{ - InstrumentValidationPolicy, Meter, INSTRUMENT_NAME_FIRST_ALPHABETIC, + InstrumentValidationPolicy, SdkMeter, INSTRUMENT_NAME_FIRST_ALPHABETIC, INSTRUMENT_NAME_INVALID_CHAR, INSTRUMENT_NAME_LENGTH, INSTRUMENT_UNIT_INVALID_CHAR, INSTRUMENT_UNIT_LENGTH, }; @@ -741,7 +741,7 @@ mod tests { #[test] fn test_instrument_config_validation() { // scope and pipelines are not related to test - let meter = Meter::new( + let meter = SdkMeter::new( Scope::default(), Arc::new(Pipelines::new(Resource::default(), Vec::new(), Vec::new())), ) diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index 9492cef76c..6f3178083c 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -8,13 +8,15 @@ use std::{ }; use opentelemetry::{ - metrics::{noop::NoopMeterCore, InstrumentProvider, Meter as ApiMeter, MetricsError, Result}, + metrics::{ + noop::NoopMeterCore, InstrumentProvider, Meter, MeterProvider, MetricsError, Result, + }, KeyValue, }; use crate::{instrumentation::Scope, Resource}; -use super::{meter::Meter as SdkMeter, pipeline::Pipelines, reader::MetricReader, view::View}; +use super::{meter::SdkMeter, pipeline::Pipelines, reader::MetricReader, view::View}; /// Handles the creation and coordination of [Meter]s. /// @@ -22,20 +24,20 @@ use super::{meter::Meter as SdkMeter, pipeline::Pipelines, reader::MetricReader, /// [Resource], have the same [View]s applied to them, and have their produced /// metric telemetry passed to the configured [MetricReader]s. /// -/// [Meter]: crate::metrics::Meter +/// [Meter]: opentelemetry::metrics::Meter #[derive(Clone, Debug)] -pub struct MeterProvider { +pub struct SdkMeterProvider { pipes: Arc, is_shutdown: Arc, } -impl Default for MeterProvider { +impl Default for SdkMeterProvider { fn default() -> Self { - MeterProvider::builder().build() + SdkMeterProvider::builder().build() } } -impl MeterProvider { +impl SdkMeterProvider { /// Flushes all pending telemetry. /// /// There is no guaranteed that all telemetry be flushed or all resources have @@ -53,10 +55,10 @@ impl MeterProvider { /// /// ``` /// use opentelemetry::{global, Context}; - /// use opentelemetry_sdk::metrics::MeterProvider; + /// use opentelemetry_sdk::metrics::SdkMeterProvider; /// - /// fn init_metrics() -> MeterProvider { - /// let provider = MeterProvider::default(); + /// fn init_metrics() -> SdkMeterProvider { + /// let provider = SdkMeterProvider::default(); /// /// // Set provider to be used as global meter provider /// let _ = global::set_meter_provider(provider.clone()); @@ -113,14 +115,14 @@ impl MeterProvider { } } -impl opentelemetry::metrics::MeterProvider for MeterProvider { +impl MeterProvider for SdkMeterProvider { fn versioned_meter( &self, name: impl Into>, version: Option>>, schema_url: Option>>, attributes: Option>, - ) -> ApiMeter { + ) -> Meter { let inst_provider: Arc = if !self.is_shutdown.load(Ordering::Relaxed) { let scope = Scope::new(name, version, schema_url, attributes); @@ -129,7 +131,7 @@ impl opentelemetry::metrics::MeterProvider for MeterProvider { Arc::new(NoopMeterCore::new()) }; - ApiMeter::new(inst_provider) + Meter::new(inst_provider) } } @@ -149,7 +151,7 @@ impl MeterProviderBuilder { /// /// By default, if this option is not used, the default [Resource] will be used. /// - /// [Meter]: crate::metrics::Meter + /// [Meter]: opentelemetry::metrics::Meter pub fn with_resource(mut self, resource: Resource) -> Self { self.resource = Some(resource); self @@ -177,8 +179,8 @@ impl MeterProviderBuilder { } /// Construct a new [MeterProvider] with this configuration. - pub fn build(self) -> MeterProvider { - MeterProvider { + pub fn build(self) -> SdkMeterProvider { + SdkMeterProvider { pipes: Arc::new(Pipelines::new( self.resource.unwrap_or_default(), self.readers, @@ -209,7 +211,8 @@ mod tests { #[test] fn test_meter_provider_resource() { // If users didn't provide a resource and there isn't a env var set. Use default one. - let assert_service_name = |provider: super::MeterProvider, expect: Option<&'static str>| { + let assert_service_name = |provider: super::SdkMeterProvider, + expect: Option<&'static str>| { assert_eq!( provider.pipes.0[0] .resource @@ -219,12 +222,14 @@ mod tests { ); }; let reader = TestMetricReader {}; - let default_meter_provider = super::MeterProvider::builder().with_reader(reader).build(); + let default_meter_provider = super::SdkMeterProvider::builder() + .with_reader(reader) + .build(); assert_service_name(default_meter_provider, Some("unknown_service")); // If user provided a resource, use that. let reader2 = TestMetricReader {}; - let custom_meter_provider = super::MeterProvider::builder() + let custom_meter_provider = super::SdkMeterProvider::builder() .with_reader(reader2) .with_resource(Resource::new(vec![KeyValue::new( "service.name", @@ -236,7 +241,9 @@ mod tests { // If `OTEL_RESOURCE_ATTRIBUTES` is set, read them automatically let reader3 = TestMetricReader {}; env::set_var("OTEL_RESOURCE_ATTRIBUTES", "key1=value1, k2, k3=value2"); - let env_resource_provider = super::MeterProvider::builder().with_reader(reader3).build(); + let env_resource_provider = super::SdkMeterProvider::builder() + .with_reader(reader3) + .build(); assert_eq!( env_resource_provider.pipes.0[0].resource, Resource::new(vec![ @@ -255,7 +262,7 @@ mod tests { "my-custom-key=env-val,k2=value2", ); let reader4 = TestMetricReader {}; - let user_provided_resource_config_provider = super::MeterProvider::builder() + let user_provided_resource_config_provider = super::SdkMeterProvider::builder() .with_reader(reader4) .with_resource( Resource::default().merge(&mut Resource::new(vec![KeyValue::new( @@ -279,7 +286,7 @@ mod tests { // If user provided a resource, it takes priority during collision. let reader5 = TestMetricReader {}; - let no_service_name = super::MeterProvider::builder() + let no_service_name = super::SdkMeterProvider::builder() .with_reader(reader5) .with_resource(Resource::empty()) .build(); diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index ece677ca42..c74e71bcd5 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -2,7 +2,7 @@ //! //! ## Configuration //! -//! The metrics SDK configuration is stored with each [MeterProvider]. +//! The metrics SDK configuration is stored with each [SdkMeterProvider]. //! Configuration for [Resource]s, [View]s, and [ManualReader] or //! [PeriodicReader] instances can be specified. //! @@ -10,16 +10,16 @@ //! //! ``` //! use opentelemetry::{ -//! metrics::{MeterProvider as _, Unit}, +//! metrics::{MeterProvider, Unit}, //! KeyValue, //! }; -//! use opentelemetry_sdk::{metrics::MeterProvider, Resource}; +//! use opentelemetry_sdk::{metrics::SdkMeterProvider, Resource}; //! //! // Generate SDK configuration, resource, views, etc //! let resource = Resource::default(); // default attributes about the current process //! //! // Create a meter provider with the desired config -//! let provider = MeterProvider::builder().with_resource(resource).build(); +//! let provider = SdkMeterProvider::builder().with_resource(resource).build(); //! //! // Use the meter provider to create meter instances //! let meter = provider.meter("my_app"); diff --git a/opentelemetry-sdk/src/metrics/pipeline.rs b/opentelemetry-sdk/src/metrics/pipeline.rs index 703cd4a040..6ebd7c9e05 100644 --- a/opentelemetry-sdk/src/metrics/pipeline.rs +++ b/opentelemetry-sdk/src/metrics/pipeline.rs @@ -98,7 +98,7 @@ impl Pipeline { Ok(move |this: &Pipeline| { let mut inner = this.inner.lock()?; - // can't compare trait objects so use index + toumbstones to drop + // can't compare trait objects so use index + tombstones to drop inner.multi_callbacks[idx] = None; Ok(()) }) diff --git a/opentelemetry-sdk/src/metrics/view.rs b/opentelemetry-sdk/src/metrics/view.rs index ceba51e1e9..23758f3bda 100644 --- a/opentelemetry-sdk/src/metrics/view.rs +++ b/opentelemetry-sdk/src/metrics/view.rs @@ -34,7 +34,7 @@ fn empty_view(_inst: &Instrument) -> Option { /// View is implemented for all `Fn(&Instrument) -> Option`. /// /// ``` -/// use opentelemetry_sdk::metrics::{Instrument, MeterProvider, Stream}; +/// use opentelemetry_sdk::metrics::{Instrument, SdkMeterProvider, Stream}; /// /// // return streams for the given instrument /// let my_view = |i: &Instrument| { @@ -42,7 +42,7 @@ fn empty_view(_inst: &Instrument) -> Option { /// None /// }; /// -/// let provider = MeterProvider::builder().with_view(my_view).build(); +/// let provider = SdkMeterProvider::builder().with_view(my_view).build(); /// # drop(provider) /// ``` pub trait View: Send + Sync + 'static { diff --git a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs index 8699c8e190..d28cd4062f 100644 --- a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs @@ -39,7 +39,7 @@ use std::sync::{Arc, Mutex}; /// let exporter = InMemoryMetricsExporter::default(); /// /// // Create a MeterProvider and register the exporter -/// let meter_provider = metrics::MeterProvider::builder() +/// let meter_provider = metrics::SdkMeterProvider::builder() /// .with_reader(PeriodicReader::builder(exporter.clone(), runtime::Tokio).build()) /// .build(); /// diff --git a/opentelemetry-stdout/examples/basic.rs b/opentelemetry-stdout/examples/basic.rs index 53a0993109..d24fba671b 100644 --- a/opentelemetry-stdout/examples/basic.rs +++ b/opentelemetry-stdout/examples/basic.rs @@ -8,7 +8,7 @@ use opentelemetry::{ }; #[cfg(all(feature = "metrics", feature = "trace"))] use opentelemetry_sdk::{ - metrics::{MeterProvider, PeriodicReader}, + metrics::{PeriodicReader, SdkMeterProvider}, runtime, trace::TracerProvider, }; @@ -22,10 +22,10 @@ fn init_trace() -> TracerProvider { } #[cfg(all(feature = "metrics", feature = "trace"))] -fn init_metrics() -> MeterProvider { +fn init_metrics() -> SdkMeterProvider { let exporter = opentelemetry_stdout::MetricsExporter::default(); let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - MeterProvider::builder().with_reader(reader).build() + SdkMeterProvider::builder().with_reader(reader).build() } #[tokio::main] diff --git a/opentelemetry-stdout/src/lib.rs b/opentelemetry-stdout/src/lib.rs index e80270af19..22690da2c3 100644 --- a/opentelemetry-stdout/src/lib.rs +++ b/opentelemetry-stdout/src/lib.rs @@ -5,11 +5,11 @@ //! ```no_run //! # #[cfg(all(feature = "metrics", feature = "trace"))] //! { -//! use opentelemetry::metrics::MeterProvider as _; +//! use opentelemetry::metrics::MeterProvider; //! use opentelemetry::trace::{Span, Tracer, TracerProvider as _}; //! use opentelemetry::{Context, KeyValue}; //! -//! use opentelemetry_sdk::metrics::{MeterProvider, PeriodicReader}; +//! use opentelemetry_sdk::metrics::{SdkMeterProvider, PeriodicReader}; //! use opentelemetry_sdk::runtime; //! use opentelemetry_sdk::trace::TracerProvider; //! @@ -20,10 +20,10 @@ //! .build() //! } //! -//! fn init_metrics() -> MeterProvider { +//! fn init_metrics() -> SdkMeterProvider { //! let exporter = opentelemetry_stdout::MetricsExporter::default(); //! let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); -//! MeterProvider::builder().with_reader(reader).build() +//! SdkMeterProvider::builder().with_reader(reader).build() //! } //! //! let tracer_provider = init_trace(); diff --git a/opentelemetry-user-events-metrics/examples/basic.rs b/opentelemetry-user-events-metrics/examples/basic.rs index b9d14e8baf..82fde30cca 100644 --- a/opentelemetry-user-events-metrics/examples/basic.rs +++ b/opentelemetry-user-events-metrics/examples/basic.rs @@ -4,14 +4,14 @@ use opentelemetry::{ KeyValue, }; use opentelemetry_sdk::{ - metrics::{MeterProvider, PeriodicReader}, + metrics::{PeriodicReader, SdkMeterProvider}, runtime, Resource, }; use opentelemetry_user_events_metrics::MetricsExporter; -fn init_metrics(exporter: MetricsExporter) -> MeterProvider { +fn init_metrics(exporter: MetricsExporter) -> SdkMeterProvider { let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - MeterProvider::builder() + SdkMeterProvider::builder() .with_resource(Resource::new(vec![KeyValue::new( "service.name", "metric-demo", diff --git a/stress/src/metrics.rs b/stress/src/metrics.rs index 3072b6cff2..9f7d2d2349 100644 --- a/stress/src/metrics.rs +++ b/stress/src/metrics.rs @@ -3,14 +3,14 @@ use opentelemetry::{ metrics::{Counter, MeterProvider as _}, KeyValue, }; -use opentelemetry_sdk::metrics::{ManualReader, MeterProvider}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::borrow::Cow; mod throughput; lazy_static! { - static ref PROVIDER: MeterProvider = MeterProvider::builder() + static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() .with_reader(ManualReader::builder().build()) .build(); static ref ATTRIBUTE_VALUES: [&'static str; 10] = [ From b8ea7c10e83a728cc5b6acf093e2cab57a8def66 Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Wed, 8 Nov 2023 17:12:57 +0100 Subject: [PATCH 11/68] Add config.yml for Templates. (#1352) --- .github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml | 5 +++-- .github/ISSUE_TEMPLATE/config.yml | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml index 54a66821eb..87d24e313b 100644 --- a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -1,4 +1,5 @@ -name: Feature Request +--- +name: "Feature Request" description: Request a feature for the OpenTelemetry Rust implementation. title: "[Feature]: " labels: ["enhancement", "triage:todo"] @@ -24,7 +25,7 @@ body: - type: textarea id: solution attributes: - label: Describe the solution you'd like: + label: "Describe the solution you'd like:" description: What do you want to happen instead? What is the expected behavior? placeholder: I'd like the api to ... validations: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..7950b463bd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +contact_links: + - name: GitHub Discussions + url: https://github.com/open-telemetry/opentelemetry-rust/discussions/new/choose + about: Please ask questions here. + - name: Slack + url: https://cloud-native.slack.com/archives/C03GDP0H023 + about: Or the `#otel-rust` channel in the CNCF Slack instance. (Not terribly responsive.) + - name: "⚠️ Report a security vulnerability" + url: "https://github.com/open-telemetry/opentelemetry-rust/security/advisories/new" + about: "Report a security vulnerability." + From 74d294c18df319d2d7ff73736ef141a745d60d2b Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Wed, 8 Nov 2023 11:18:13 -0500 Subject: [PATCH 12/68] prometheus: Ignore unknown instrument units (#1348) ## Motivation The [metric unit semantic conventions] suggest that integer counts should use annotations (e.g. `{packet}`), which breaks the current unit appending logic as they are not properly escaped. [metric unit semantic conventions]: https://github.com/open-telemetry/semantic-conventions/blob/v1.23.0/docs/general/metrics.md#instrument-units ## Solution Ignore unknown units (including annotations) as other language implementations currently do. This change also removes the `$` mapping as it is not UCUM. --- opentelemetry-prometheus/CHANGELOG.md | 4 ++++ opentelemetry-prometheus/src/utils.rs | 13 ++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index e7ced72311..cb3868519b 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +### Fixed + +- Fix UCUM annotation escaping by ignoring unknown instrument units and annotations (#1348) + ## v0.14.0 ### Changed diff --git a/opentelemetry-prometheus/src/utils.rs b/opentelemetry-prometheus/src/utils.rs index 5243c22aa3..d0a66a6e5a 100644 --- a/opentelemetry-prometheus/src/utils.rs +++ b/opentelemetry-prometheus/src/utils.rs @@ -36,7 +36,9 @@ pub(crate) fn get_unit_suffixes(unit: &Unit) -> Option> { }; } - Some(Cow::Owned(unit.as_str().to_string())) + // Unmatched units and annotations are ignored + // e.g. "{request}" + None } fn get_prom_units(unit: &str) -> Option<&'static str> { @@ -49,9 +51,9 @@ fn get_prom_units(unit: &str) -> Option<&'static str> { "ms" => Some("milliseconds"), "us" => Some("microseconds"), "ns" => Some("nanoseconds"), - "By" => Some("bytes"), // Bytes + "By" => Some("bytes"), "KiBy" => Some("kibibytes"), "MiBy" => Some("mebibytes"), "GiBy" => Some("gibibytes"), @@ -79,7 +81,6 @@ fn get_prom_units(unit: &str) -> Option<&'static str> { "Hz" => Some("hertz"), "1" => Some("ratio"), "%" => Some("percent"), - "$" => Some("dollars"), _ => None, } } @@ -185,10 +186,12 @@ mod tests { ("1/y", Some(Cow::Owned("per_year".to_owned()))), ("m/s", Some(Cow::Owned("meters_per_second".to_owned()))), // No match - ("invalid", Some(Cow::Owned("invalid".to_string()))), + ("invalid", None), ("invalid/invalid", None), - ("seconds", Some(Cow::Owned("seconds".to_string()))), + ("seconds", None), ("", None), + // annotations + ("{request}", None), ]; for (unit_str, expected_suffix) in test_cases { let unit = Unit::new(unit_str); From 154644013ff9c53696d1977d248beff30c75d023 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 9 Nov 2023 07:33:50 -0800 Subject: [PATCH 13/68] Use HashMap instead of IndexMap in LogRecord (#1353) --- opentelemetry/CHANGELOG.md | 15 + opentelemetry/Cargo.toml | 1 - opentelemetry/src/lib.rs | 4 - opentelemetry/src/logs/record.rs | 8 +- opentelemetry/src/order_map.rs | 670 ------------------------------- opentelemetry/src/trace/mod.rs | 4 - 6 files changed, 19 insertions(+), 683 deletions(-) delete mode 100644 opentelemetry/src/order_map.rs diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index d6d35a8616..078c0f315b 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -2,6 +2,21 @@ ## vNext +### Changed + +Modified `AnyValue.Map` to be backed by `HashMap` instead of custom `OrderMap`, +which internally used `IndexMap`. There was no requirement to maintain the order +of entries, so moving from `IndexMap` to `HashMap` offers slight performance +gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of +`LogRecord`. +[#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) + +### Removed + +Removed `OrderMap` type as there was no requirement to use this over regular +`HashMap`. +[#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) + ## [v0.21.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.20.0...v0.21.0) This release should been seen as 1.0-rc4 following 1.0-rc3 in v0.20.0. Refer to CHANGELOG.md in individual creates for details on changes made in different creates. diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 1ad3294da4..39ec98087f 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -23,7 +23,6 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] futures-core = "0.3" futures-sink = "0.3" -indexmap = "2.0" once_cell = "1.12.0" pin-project-lite = { version = "0.2", optional = true } thiserror = "1.0.7" diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index cdc44372d4..9d2088f2ac 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -212,10 +212,6 @@ pub use context::{Context, ContextGuard}; mod common; -mod order_map; - -pub use order_map::OrderMap; - #[cfg(any(feature = "testing", test))] #[doc(hidden)] pub mod testing; diff --git a/opentelemetry/src/logs/record.rs b/opentelemetry/src/logs/record.rs index a65310be39..0bd7604aea 100644 --- a/opentelemetry/src/logs/record.rs +++ b/opentelemetry/src/logs/record.rs @@ -1,8 +1,8 @@ use crate::{ trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId}, - Array, Key, OrderMap, StringValue, Value, + Array, Key, StringValue, Value, }; -use std::{borrow::Cow, time::SystemTime}; +use std::{borrow::Cow, collections::HashMap, time::SystemTime}; #[derive(Debug, Clone)] #[non_exhaustive] @@ -90,7 +90,7 @@ pub enum AnyValue { /// An array of `Any` values ListAny(Vec), /// A map of string keys to `Any` values, arbitrarily nested. - Map(OrderMap), + Map(HashMap), } macro_rules! impl_trivial_from { @@ -133,7 +133,7 @@ impl, V: Into> FromIterator<(K, V)> for AnyValue { /// Creates an [`AnyValue::Map`] value from a sequence of key-value pairs /// that can be converted into a `Key` and `AnyValue` respectively. fn from_iter>(iter: I) -> Self { - AnyValue::Map(OrderMap::from_iter( + AnyValue::Map(HashMap::from_iter( iter.into_iter().map(|(k, v)| (k.into(), v.into())), )) } diff --git a/opentelemetry/src/order_map.rs b/opentelemetry/src/order_map.rs deleted file mode 100644 index 46eb704a99..0000000000 --- a/opentelemetry/src/order_map.rs +++ /dev/null @@ -1,670 +0,0 @@ -use crate::{Key, KeyValue, Value}; -use indexmap::map::{ - Drain, Entry, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut, -}; -use indexmap::{Equivalent, IndexMap}; -use std::collections::hash_map::RandomState; -use std::hash::{BuildHasher, Hash}; -use std::iter::FromIterator; -use std::ops::{Index, IndexMut, RangeBounds}; - -/// A hash table implementation that preserves insertion order across all operations. -/// -/// Entries will be returned according to their insertion order when iterating over the collection. -#[derive(Clone, Debug)] -pub struct OrderMap(IndexMap); - -impl OrderMap { - /// Create a new map. (Does not allocate) - #[inline] - pub fn new() -> Self { - Self(IndexMap::new()) - } - - /// Create a new map with capacity for `n` key-value pairs. (Does not - /// allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - #[inline] - pub fn with_capacity(n: usize) -> Self { - Self(IndexMap::with_capacity(n)) - } -} - -impl OrderMap { - /// Create a new map with capacity for `n` key-value pairs. (Does not - /// allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - #[inline] - pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { - Self(IndexMap::with_capacity_and_hasher(n, hash_builder)) - } - - /// Create a new map with `hash_builder`. - /// - /// This function is `const`, so it - /// can be called in `static` contexts. - pub const fn with_hasher(hash_builder: S) -> Self { - Self(IndexMap::with_hasher(hash_builder)) - } - - /// Computes in **O(1)** time. - pub fn capacity(&self) -> usize { - self.0.capacity() - } - - /// Return a reference to the map's `BuildHasher`. - pub fn hasher(&self) -> &S { - self.0.hasher() - } - - /// Return the number of key-value pairs in the map. - /// - /// Computes in **O(1)** time. - #[inline] - pub fn len(&self) -> usize { - self.0.len() - } - - /// Returns true if the map contains no elements. - /// - /// Computes in **O(1)** time. - #[inline] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Return an iterator over the key-value pairs of the map, in their order - pub fn iter(&self) -> Iter<'_, K, V> { - self.0.iter() - } - - /// Return an iterator over the key-value pairs of the map, in their order - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - self.0.iter_mut() - } - - /// Return an iterator over the keys of the map, in their order - pub fn keys(&self) -> Keys<'_, K, V> { - self.0.keys() - } - - /// Return an owning iterator over the keys of the map, in their order - pub fn into_keys(self) -> IntoKeys { - self.0.into_keys() - } - - /// Return an iterator over the values of the map, in their order - pub fn values(&self) -> Values<'_, K, V> { - self.0.values() - } - - /// Return an iterator over mutable references to the values of the map, - /// in their order - pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { - self.0.values_mut() - } - - /// Return an owning iterator over the values of the map, in their order - pub fn into_values(self) -> IntoValues { - self.0.into_values() - } - - /// Remove all key-value pairs in the map, while preserving its capacity. - /// - /// Computes in **O(n)** time. - pub fn clear(&mut self) { - self.0.clear(); - } - - /// Shortens the map, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than the map's current length, this has no effect. - pub fn truncate(&mut self, len: usize) { - self.0.truncate(len); - } - - /// Clears the `IndexMap` in the given index range, returning those - /// key-value pairs as a drain iterator. - /// - /// The range may be any type that implements `RangeBounds`, - /// including all of the `std::ops::Range*` types, or even a tuple pair of - /// `Bound` start and end values. To drain the map entirely, use `RangeFull` - /// like `map.drain(..)`. - /// - /// This shifts down all entries following the drained range to fill the - /// gap, and keeps the allocated memory for reuse. - /// - /// ***Panics*** if the starting point is greater than the end point or if - /// the end point is greater than the length of the map. - pub fn drain(&mut self, range: R) -> Drain<'_, K, V> - where - R: RangeBounds, - { - self.0.drain(range) - } - - /// Splits the collection into two at the given index. - /// - /// Returns a newly allocated map containing the elements in the range - /// `[at, len)`. After the call, the original map will be left containing - /// the elements `[0, at)` with its previous capacity unchanged. - /// - /// ***Panics*** if `at > len`. - pub fn split_off(&mut self, at: usize) -> Self - where - S: Clone, - { - Self(self.0.split_off(at)) - } -} - -impl OrderMap -where - K: Hash + Eq, - S: BuildHasher, -{ - /// Reserve capacity for `additional` more key-value pairs. - /// - /// Computes in **O(n)** time. - pub fn reserve(&mut self, additional: usize) { - self.0.reserve(additional) - } - - /// Shrink the capacity of the map as much as possible. - /// - /// Computes in **O(n)** time. - pub fn shrink_to_fit(&mut self) { - self.0.shrink_to_fit() - } - - /// Insert a key-value pair in the map. - /// - /// If an equivalent key already exists in the map: the key remains and - /// retains in its place in the order, its corresponding value is updated - /// with `value` and the older value is returned inside `Some(_)`. - /// - /// If no equivalent key existed in the map: the new key-value pair is - /// inserted, last in order, and `None` is returned. - /// - /// Computes in **O(1)** time (amortized average). - /// - /// See also [`entry`](#method.entry) if you you want to insert *or* modify - /// or if you need to get the index of the corresponding key-value pair. - pub fn insert(&mut self, key: K, value: V) -> Option { - self.0.insert(key, value) - } - - /// Insert a key-value pair in the map, and get their index. - /// - /// If an equivalent key already exists in the map: the key remains and - /// retains in its place in the order, its corresponding value is updated - /// with `value` and the older value is returned inside `(index, Some(_))`. - /// - /// If no equivalent key existed in the map: the new key-value pair is - /// inserted, last in order, and `(index, None)` is returned. - /// - /// Computes in **O(1)** time (amortized average). - /// - /// See also [`entry`](#method.entry) if you you want to insert *or* modify - /// or if you need to get the index of the corresponding key-value pair. - pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { - self.0.insert_full(key, value) - } - - /// Get the given key’s corresponding entry in the map for insertion and/or - /// in-place manipulation. - /// - /// Computes in **O(1)** time (amortized average). - pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { - self.0.entry(key) - } - - /// Return `true` if an equivalent to `key` exists in the map. - /// - /// Computes in **O(1)** time (average). - pub fn contains_key(&self, key: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.0.contains_key(key) - } - - /// Return a reference to the value stored for `key`, if it is present, - /// else `None`. - /// - /// Computes in **O(1)** time (average). - pub fn get(&self, key: &Q) -> Option<&V> - where - Q: Hash + Equivalent, - { - self.0.get(key) - } - - /// Return references to the key-value pair stored for `key`, - /// if it is present, else `None`. - /// - /// Computes in **O(1)** time (average). - pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> - where - Q: Hash + Equivalent, - { - self.0.get_key_value(key) - } - - /// Return item index, key and value - pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> - where - Q: Hash + Equivalent, - { - self.0.get_full(key) - } - - /// Return item index, if it exists in the map - /// - /// Computes in **O(1)** time (average). - pub fn get_index_of(&self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.0.get_index_of(key) - } - - /// Return a mutable reference to the element pointed at by `key`, if it exists. - pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> - where - Q: Hash + Equivalent, - { - self.0.get_mut(key) - } - - /// Return a mutable reference to the element pointed at by `key`, if it exists. - /// It also returns the element's index and its key. - pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> - where - Q: Hash + Equivalent, - { - self.0.get_full_mut(key) - } - - /// Remove the key-value pair equivalent to `key` and return - /// its value. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove(&mut self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.0.shift_remove(key) - } - - /// Remove and return the key-value pair equivalent to `key`. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - Q: Hash + Equivalent, - { - self.0.shift_remove_entry(key) - } - - /// Remove the key-value pair equivalent to `key` and return it and - /// the index it had. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> - where - Q: Hash + Equivalent, - { - self.0.shift_remove_full(key) - } - - /// Remove the last key-value pair - /// - /// This preserves the order of the remaining elements. - /// - /// Computes in **O(1)** time (average). - pub fn pop(&mut self) -> Option<(K, V)> { - self.0.pop() - } - - /// Scan through each key-value pair in the map and keep those where the - /// closure `keep` returns `true`. - /// - /// The elements are visited in order, and remaining elements keep their - /// order. - /// - /// Computes in **O(n)** time (average). - pub fn retain(&mut self, keep: F) - where - F: FnMut(&K, &mut V) -> bool, - { - self.0.retain(keep); - } -} - -impl OrderMap { - /// Get a key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { - self.0.get_index(index) - } - - /// Get a key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { - self.0.get_index_mut(index) - } - - /// Get the first key-value pair - /// - /// Computes in **O(1)** time. - pub fn first(&self) -> Option<(&K, &V)> { - self.0.first() - } - - /// Get the first key-value pair, with mutable access to the value - /// - /// Computes in **O(1)** time. - pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { - self.0.first_mut() - } - - /// Get the last key-value pair - /// - /// Computes in **O(1)** time. - pub fn last(&self) -> Option<(&K, &V)> { - self.0.last() - } - - /// Get the last key-value pair, with mutable access to the value - /// - /// Computes in **O(1)** time. - pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { - self.0.last_mut() - } - - /// Remove the key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { - self.0.shift_remove_index(index) - } -} - -impl<'a, K, V, S> IntoIterator for &'a OrderMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - fn into_iter(self) -> Self::IntoIter { - self.0.iter() - } -} - -impl<'a, K, V, S> IntoIterator for &'a mut OrderMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - fn into_iter(self) -> Self::IntoIter { - self.0.iter_mut() - } -} - -impl IntoIterator for OrderMap { - type Item = (K, V); - type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - -/// Access `OrderMap` values corresponding to a key. -/// -/// Panics if the value is missing. -impl Index<&Q> for OrderMap -where - Q: Hash + Equivalent, - K: Hash + Eq, - S: BuildHasher, -{ - type Output = V; - - /// Returns a reference to the value corresponding to the supplied `key`. - /// - /// ***Panics*** if `key` is not present in the map. - fn index(&self, key: &Q) -> &V { - self.0.index(key) - } -} - -/// Access `Ordermap` values corresponding to a key. -/// -/// Mutable indexing allows changing / updating values of key-value -/// pairs that are already present. -/// -/// You can **not** insert new pairs with index syntax, use `.insert()`. -impl IndexMut<&Q> for OrderMap -where - Q: Hash + Equivalent, - K: Hash + Eq, - S: BuildHasher, -{ - /// Returns a mutable reference to the value corresponding to the supplied `key`. - /// - /// ***Panics*** if `key` is not present in the map. - fn index_mut(&mut self, key: &Q) -> &mut V { - self.0.index_mut(key) - } -} - -/// Access `IndexMap` values at indexed positions. -/// -/// It panics if the index is out of bounds. -impl Index for OrderMap { - type Output = V; - - /// Returns a reference to the value at the supplied `index`. - /// - /// ***Panics*** if `index` is out of bounds. - fn index(&self, index: usize) -> &V { - self.0.index(index) - } -} - -/// Access `IndexMap` values at indexed positions. -/// -/// Mutable indexing allows changing / updating indexed values -/// that are already present. -/// -/// You can **not** insert new values with index syntax, use `.insert()`. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// map.insert(word.to_lowercase(), word.to_string()); -/// } -/// let lorem = &mut map[0]; -/// assert_eq!(lorem, "Lorem"); -/// lorem.retain(char::is_lowercase); -/// assert_eq!(map["lorem"], "orem"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// map.insert("foo", 1); -/// map[10] = 1; // panics! -/// ``` -impl IndexMut for OrderMap { - /// Returns a mutable reference to the value at the supplied `index`. - /// - /// ***Panics*** if `index` is out of bounds. - fn index_mut(&mut self, index: usize) -> &mut V { - self.0.index_mut(index) - } -} - -impl FromIterator<(K, V)> for OrderMap -where - K: Hash + Eq, - S: BuildHasher + Default, -{ - /// Create an `OrderMap` from the sequence of key-value pairs in the - /// iterable. - /// - /// `from_iter` uses the same logic as `extend`. See - /// [`extend`](#method.extend) for more details. - fn from_iter>(iterable: I) -> Self { - Self(IndexMap::from_iter(iterable)) - } -} - -// todo: uncomment when the MSRV bumps -// impl From<[(K, V); N]> for OrderMap -// where -// K: Hash + Eq, -// { -// fn from(arr: [(K, V); N]) -> Self { -// Self(IndexMap::from(arr)) -// } -// } - -impl Extend<(K, V)> for OrderMap -where - K: Hash + Eq, - S: BuildHasher, -{ - /// Extend the map with all key-value pairs in the iterable. - /// - /// This is equivalent to calling [`insert`](#method.insert) for each of - /// them in order, which means that for keys that already existed - /// in the map, their value is updated but it keeps the existing order. - /// - /// New keys are inserted in the order they appear in the sequence. If - /// equivalents of a key occur more than once, the last corresponding value - /// prevails. - fn extend>(&mut self, iterable: I) { - self.0.extend(iterable) - } -} - -impl<'a, K, V, S> Extend<(&'a K, &'a V)> for OrderMap -where - K: 'a + Hash + Eq + Copy, - V: 'a + Copy, - S: BuildHasher, -{ - /// Extend the map with all key-value pairs in the iterable. - /// - /// See the first extend method for more details. - fn extend>(&mut self, iterable: I) { - self.0.extend(iterable) - } -} - -impl Default for OrderMap -where - S: Default, -{ - /// Return an empty `OrderMap` - fn default() -> Self { - Self(IndexMap::default()) - } -} - -impl PartialEq> for OrderMap -where - K: Hash + Eq, - V1: PartialEq, - S1: BuildHasher, - S2: BuildHasher, -{ - fn eq(&self, other: &OrderMap) -> bool { - self.0.eq(&other.0) - } -} - -impl Eq for OrderMap -where - K: Eq + Hash, - V: Eq, - S: BuildHasher, -{ -} - -impl FromIterator for OrderMap -where - S: BuildHasher + Default, -{ - /// Create an `OrderMap` from the sequence of key-value pairs in the - /// iterable. - /// - /// `from_iter` uses the same logic as `extend`. See - /// [`extend`](#method.extend) for more details. - fn from_iter>(iterable: I) -> Self { - Self(IndexMap::from_iter( - iterable.into_iter().map(|kv| (kv.key, kv.value)), - )) - } -} - -// todo: uncomment below when bumping MSRV -// impl From<[KeyValue; N]> for OrderMap { -// fn from(arr: [KeyValue; N]) -> Self { -// let arr = arr.map(|kv| (kv.key, kv.value)); -// Self(IndexMap::from(arr)) -// } -// } - -impl Extend for OrderMap -where - S: BuildHasher, -{ - /// Extend the map with all key-value pairs in the iterable. - /// - /// This is equivalent to calling [`insert`](#method.insert) for each of - /// them in order, which means that for keys that already existed - /// in the map, their value is updated but it keeps the existing order. - /// - /// New keys are inserted in the order they appear in the sequence. If - /// equivalents of a key occur more than once, the last corresponding value - /// prevails. - fn extend>(&mut self, iterable: I) { - self.0 - .extend(iterable.into_iter().map(|kv| (kv.key, kv.value))) - } -} diff --git a/opentelemetry/src/trace/mod.rs b/opentelemetry/src/trace/mod.rs index 2cd44f3059..038064ca49 100644 --- a/opentelemetry/src/trace/mod.rs +++ b/opentelemetry/src/trace/mod.rs @@ -183,12 +183,8 @@ pub use self::{ tracer_provider::TracerProvider, }; use crate::{ExportError, KeyValue}; -use std::collections::hash_map::RandomState; use std::sync::PoisonError; -/// re-export OrderMap to mitigate breaking change -pub type OrderMap = crate::order_map::OrderMap; - /// Describe the result of operations in tracing API. pub type TraceResult = Result; From 747895423e113ee2d662ee9def1ae7d3cbb56edd Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Thu, 9 Nov 2023 16:42:17 +0100 Subject: [PATCH 14/68] Move @djc to emeritus (#1359) --- CONTRIBUTING.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9451f9e427..dbbd63547b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,11 +105,11 @@ For a deeper discussion, see: https://github.com/open-telemetry/opentelemetry-specification/issues/165 ### Error Handling -Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. +Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. -The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. +The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. -For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. +For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. ### Priority of configurations OpenTelemetry supports multiple ways to configure the API, SDK and other components. The priority of configurations is as follows: @@ -130,7 +130,7 @@ issues. * Run `cargo test --all` - this will execute code and doc tests for all projects in this workspace. -* Run `cargo bench` - this will run benchmarks to show performance +* Run `cargo bench` - this will run benchmarks to show performance regressions ## Approvers and Maintainers @@ -139,7 +139,6 @@ For GitHub groups see the [code owners](CODEOWNERS) file. ### Maintainers -* [Dirkjan Ochtman](https://github.com/djc) * [Harold Dost](https://github.com/hdost) * [Julian Tescher](https://github.com/jtescher) * [Zhongyang Wu](https://github.com/TommyCpp) @@ -152,6 +151,7 @@ For GitHub groups see the [code owners](CODEOWNERS) file. ### Emeritus +* [Dirkjan Ochtman](https://github.com/djc) * [Jan Kühle](https://github.com/frigus02) * [Isobel Redelmeier](https://github.com/iredelmeier) @@ -166,4 +166,4 @@ repo](https://github.com/open-telemetry/community/blob/master/community-membersh ## FAQ ### Where should I put third party propagators/exporters, contrib or standalone crates? -As of now, the specification classify the propagators into three categories: Fully opened standards, platform-specific standards, proprietary headers. The conclusion is only the fully opened standards should live in SDK packages/repos. So here, only fully opened standards should live as independent crate. For more detail and discussion, see [this pr](https://github.com/open-telemetry/opentelemetry-specification/pull/1144). \ No newline at end of file +As of now, the specification classify the propagators into three categories: Fully opened standards, platform-specific standards, proprietary headers. The conclusion is only the fully opened standards should live in SDK packages/repos. So here, only fully opened standards should live as independent crate. For more detail and discussion, see [this pr](https://github.com/open-telemetry/opentelemetry-specification/pull/1144). From d7e46292fdc17252ec5353a09aa616e27ee6a55e Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 9 Nov 2023 07:48:47 -0800 Subject: [PATCH 15/68] Span events stored as Vector instead of EvictedQueue (#1350) --- .../src/exporter/model/mod.rs | 5 +- opentelemetry-jaeger/src/exporter/mod.rs | 12 ++-- opentelemetry-proto/src/transform/trace.rs | 4 +- opentelemetry-sdk/CHANGELOG.md | 18 +++-- .../benches/batch_span_processor.rs | 4 +- opentelemetry-sdk/src/export/trace.rs | 4 +- opentelemetry-sdk/src/testing/trace/mod.rs | 4 +- opentelemetry-sdk/src/trace/events.rs | 37 +++++++++++ opentelemetry-sdk/src/trace/mod.rs | 44 ++++++++++++- opentelemetry-sdk/src/trace/span.rs | 66 ++++++++++++++----- opentelemetry-sdk/src/trace/span_processor.rs | 4 +- opentelemetry-sdk/src/trace/tracer.rs | 21 ++++-- opentelemetry-stdout/src/trace/transform.rs | 2 +- .../src/exporter/model/span.rs | 6 +- 14 files changed, 176 insertions(+), 55 deletions(-) create mode 100644 opentelemetry-sdk/src/trace/events.rs diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs index cecc080429..f0b626a3ec 100644 --- a/opentelemetry-datadog/src/exporter/model/mod.rs +++ b/opentelemetry-datadog/src/exporter/model/mod.rs @@ -195,7 +195,7 @@ pub(crate) mod tests { }; use opentelemetry_sdk::{ self, - trace::{EvictedQueue, SpanLinks}, + trace::{SpanEvents, SpanLinks}, InstrumentationLibrary, Resource, }; use std::borrow::Cow; @@ -217,9 +217,8 @@ pub(crate) mod tests { let start_time = SystemTime::UNIX_EPOCH; let end_time = start_time.checked_add(Duration::from_secs(1)).unwrap(); - let capacity = 3; let attributes = vec![KeyValue::new("span.type", "web")]; - let events = EvictedQueue::new(capacity); + let events = SpanEvents::default(); let links = SpanLinks::default(); let resource = Resource::new(vec![KeyValue::new("host.name", "test")]); diff --git a/opentelemetry-jaeger/src/exporter/mod.rs b/opentelemetry-jaeger/src/exporter/mod.rs index 82bc7089a7..9bc1d91d3c 100644 --- a/opentelemetry-jaeger/src/exporter/mod.rs +++ b/opentelemetry-jaeger/src/exporter/mod.rs @@ -19,13 +19,11 @@ use opentelemetry::{ trace::{Event, Link, SpanKind, Status}, InstrumentationLibrary, Key, KeyValue, }; -use opentelemetry_sdk::{ - export::{ - trace::{ExportResult, SpanData, SpanExporter}, - ExportError, - }, - trace::EvictedQueue, +use opentelemetry_sdk::export::{ + trace::{ExportResult, SpanData, SpanExporter}, + ExportError, }; +use opentelemetry_sdk::trace::SpanEvents; use crate::exporter::uploader::Uploader; @@ -255,7 +253,7 @@ impl UserOverrides { } } -fn events_to_logs(events: EvictedQueue) -> Option> { +fn events_to_logs(events: SpanEvents) -> Option> { if events.is_empty() { None } else { diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 1a16b070e9..616c531c75 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -82,7 +82,7 @@ pub mod tonic { end_time_unix_nano: to_nanos(source_span.end_time), dropped_attributes_count: source_span.dropped_attributes_count, attributes: Attributes::from(source_span.attributes).0, - dropped_events_count: source_span.events.dropped_count(), + dropped_events_count: source_span.events.dropped_count, events: source_span .events .into_iter() @@ -193,7 +193,7 @@ pub mod grpcio { end_time_unix_nano: to_nanos(source_span.end_time), dropped_attributes_count: source_span.dropped_attributes_count, attributes: Attributes::from(source_span.attributes).0, - dropped_events_count: source_span.events.dropped_count(), + dropped_events_count: source_span.events.dropped_count, events: source_span .events .into_iter() diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 2a6d9efd68..919467a998 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -5,19 +5,23 @@ ### Changed - **Breaking** -[#1313](https://github.com/open-telemetry/opentelemetry-rust/issues/1313) - Changes how Span links are stored to achieve performance gains. See below for - details: - - *Behavior Change*: When enforcing `max_links_per_span` from `SpanLimits`, - links are kept in the first-come order. The previous "eviction" based approach - is no longer performed. +[#1313](https://github.com/open-telemetry/opentelemetry-rust/pull/1313) +[#1350](https://github.com/open-telemetry/opentelemetry-rust/pull/1350) + Changes how Span links/events are stored to achieve performance gains. See + below for details: + + *Behavior Change*: When enforcing `max_links_per_span`, `max_events_per_span` + from `SpanLimits`, links/events are kept in the first-come order. The previous + "eviction" based approach is no longer performed. *Breaking Change Affecting Exporter authors*: `SpanData` now stores `links` as `SpanLinks` instead of `EvictedQueue` where `SpanLinks` is a struct with a `Vec` of links and `dropped_count`. + `SpanData` now stores `events` as `SpanEvents` instead of `EvictedQueue` where + `SpanEvents` is a struct with a `Vec` of events and `dropped_count`. + ## v0.21.0 ### Added diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index e5bea6dcd6..4e2301e203 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -5,7 +5,7 @@ use opentelemetry::trace::{ use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::runtime::Tokio; use opentelemetry_sdk::testing::trace::NoopSpanExporter; -use opentelemetry_sdk::trace::{BatchSpanProcessor, EvictedQueue, SpanLinks, SpanProcessor}; +use opentelemetry_sdk::trace::{BatchSpanProcessor, SpanEvents, SpanLinks, SpanProcessor}; use opentelemetry_sdk::Resource; use std::borrow::Cow; use std::sync::Arc; @@ -29,7 +29,7 @@ fn get_span_data() -> Vec { end_time: SystemTime::now(), attributes: Vec::new(), dropped_attributes_count: 0, - events: EvictedQueue::new(12), + events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, resource: Cow::Owned(Resource::empty()), diff --git a/opentelemetry-sdk/src/export/trace.rs b/opentelemetry-sdk/src/export/trace.rs index c75a0dd94a..b3d99c9a13 100644 --- a/opentelemetry-sdk/src/export/trace.rs +++ b/opentelemetry-sdk/src/export/trace.rs @@ -1,7 +1,7 @@ //! Trace exporters use crate::Resource; use futures_util::future::BoxFuture; -use opentelemetry::trace::{Event, SpanContext, SpanId, SpanKind, Status, TraceError}; +use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceError}; use opentelemetry::KeyValue; use std::borrow::Cow; use std::fmt::Debug; @@ -87,7 +87,7 @@ pub struct SpanData { /// dropped. pub dropped_attributes_count: u32, /// Span events - pub events: crate::trace::EvictedQueue, + pub events: crate::trace::SpanEvents, /// Span Links pub links: crate::trace::SpanLinks, /// Span status diff --git a/opentelemetry-sdk/src/testing/trace/mod.rs b/opentelemetry-sdk/src/testing/trace/mod.rs index 98b8ef3c86..7605101d86 100644 --- a/opentelemetry-sdk/src/testing/trace/mod.rs +++ b/opentelemetry-sdk/src/testing/trace/mod.rs @@ -7,7 +7,7 @@ use crate::{ trace::{ExportResult, SpanData, SpanExporter}, ExportError, }, - trace::{Config, EvictedQueue, SpanLinks}, + trace::{Config, SpanEvents, SpanLinks}, InstrumentationLibrary, }; use async_trait::async_trait; @@ -36,7 +36,7 @@ pub fn new_test_export_span_data() -> SpanData { end_time: opentelemetry::time::now(), attributes: Vec::new(), dropped_attributes_count: 0, - events: EvictedQueue::new(config.span_limits.max_events_per_span), + events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, resource: config.resource, diff --git a/opentelemetry-sdk/src/trace/events.rs b/opentelemetry-sdk/src/trace/events.rs new file mode 100644 index 0000000000..fbf2bcb928 --- /dev/null +++ b/opentelemetry-sdk/src/trace/events.rs @@ -0,0 +1,37 @@ +//! # Span Events + +use std::ops::Deref; + +use opentelemetry::trace::Event; +/// Stores span events along with dropped count. +#[derive(Clone, Debug, Default, PartialEq)] +#[non_exhaustive] +pub struct SpanEvents { + /// The events stored as a vector. Could be empty if there are no events. + pub events: Vec, + /// The number of Events dropped from the span. + pub dropped_count: u32, +} + +impl Deref for SpanEvents { + type Target = [Event]; + + fn deref(&self) -> &Self::Target { + &self.events + } +} + +impl IntoIterator for SpanEvents { + type Item = Event; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.events.into_iter() + } +} + +impl SpanEvents { + pub(crate) fn add_event(&mut self, event: Event) { + self.events.push(event); + } +} diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index e064e85e4a..d031cc177f 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -7,6 +7,7 @@ //! current operation execution. //! * The [`TracerProvider`] struct which configures and produces [`Tracer`]s. mod config; +mod events; mod evicted_hash_map; mod evicted_queue; mod id_generator; @@ -19,6 +20,7 @@ mod span_processor; mod tracer; pub use config::{config, Config}; +pub use events::SpanEvents; pub use evicted_hash_map::EvictedHashMap; pub use evicted_queue::EvictedQueue; pub use id_generator::{aws::XrayIdGenerator, IdGenerator, RandomIdGenerator}; @@ -42,11 +44,12 @@ mod runtime_tests; mod tests { use super::*; use crate::{ - testing::trace::InMemorySpanExporterBuilder, trace::span_limit::DEFAULT_MAX_LINKS_PER_SPAN, + testing::trace::InMemorySpanExporterBuilder, + trace::span_limit::{DEFAULT_MAX_EVENT_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN}, }; use opentelemetry::{ trace::{ - Link, Span, SpanBuilder, SpanContext, SpanId, TraceFlags, TraceId, Tracer, + Event, Link, Span, SpanBuilder, SpanContext, SpanId, TraceFlags, TraceId, Tracer, TracerProvider as _, }, KeyValue, @@ -140,4 +143,41 @@ mod tests { assert_eq!(span.name, "span_name"); assert_eq!(span.links.len(), DEFAULT_MAX_LINKS_PER_SPAN as usize); } + + #[test] + fn exceed_span_events_limit() { + // Arrange + let exporter = InMemorySpanExporterBuilder::new().build(); + let provider = TracerProvider::builder() + .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) + .build(); + + // Act + let tracer = provider.tracer("test_tracer"); + + let mut events = Vec::new(); + for _i in 0..(DEFAULT_MAX_EVENT_PER_SPAN * 2) { + events.push(Event::with_name("test event")) + } + + // add events via span builder + let span_builder = SpanBuilder::from_name("span_name").with_events(events); + let mut span = tracer.build(span_builder); + + // add events using span api after building the span + span.add_event("test event again, after span builder", Vec::new()); + span.add_event("test event once again, after span builder", Vec::new()); + span.end(); + provider.force_flush(); + + // Assert + let exported_spans = exporter + .get_finished_spans() + .expect("Spans are expected to be exported."); + assert_eq!(exported_spans.len(), 1); + let span = &exported_spans[0]; + assert_eq!(span.name, "span_name"); + assert_eq!(span.events.len(), DEFAULT_MAX_EVENT_PER_SPAN as usize); + assert_eq!(span.events.dropped_count, DEFAULT_MAX_EVENT_PER_SPAN + 2); + } } diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index 2839c4114e..0d58061b09 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -11,7 +11,7 @@ use crate::trace::SpanLimits; use crate::Resource; use opentelemetry::trace::{Event, SpanContext, SpanId, SpanKind, Status}; -use opentelemetry::{trace, KeyValue}; +use opentelemetry::KeyValue; use std::borrow::Cow; use std::time::SystemTime; @@ -42,7 +42,7 @@ pub(crate) struct SpanData { /// dropped. pub(crate) dropped_attributes_count: u32, /// Span events - pub(crate) events: crate::trace::EvictedQueue, + pub(crate) events: crate::trace::SpanEvents, /// Span Links pub(crate) links: crate::trace::SpanLinks, /// Span status @@ -99,17 +99,23 @@ impl opentelemetry::trace::Span for Span { ) where T: Into>, { + let span_events_limit = self.span_limits.max_events_per_span as usize; let event_attributes_limit = self.span_limits.max_attributes_per_event as usize; self.with_data(|data| { - let dropped_attributes_count = attributes.len().saturating_sub(event_attributes_limit); - attributes.truncate(event_attributes_limit); - - data.events.push_back(Event::new( - name, - timestamp, - attributes, - dropped_attributes_count as u32, - )) + if data.events.len() < span_events_limit { + let dropped_attributes_count = + attributes.len().saturating_sub(event_attributes_limit); + attributes.truncate(event_attributes_limit); + + data.events.add_event(Event::new( + name, + timestamp, + attributes, + dropped_attributes_count as u32, + )); + } else { + data.events.dropped_count += 1; + } }); } @@ -252,17 +258,16 @@ mod tests { use crate::testing::trace::NoopSpanExporter; use crate::trace::span_limit::{ DEFAULT_MAX_ATTRIBUTES_PER_EVENT, DEFAULT_MAX_ATTRIBUTES_PER_LINK, - DEFAULT_MAX_ATTRIBUTES_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN, + DEFAULT_MAX_ATTRIBUTES_PER_SPAN, DEFAULT_MAX_EVENT_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN, }; - use crate::trace::SpanLinks; - use opentelemetry::trace::{Link, SpanBuilder, TraceFlags, TraceId, Tracer}; + use crate::trace::{SpanEvents, SpanLinks}; + use opentelemetry::trace::{self, Link, SpanBuilder, TraceFlags, TraceId, Tracer}; use opentelemetry::{trace::Span as _, trace::TracerProvider, KeyValue}; use std::time::Duration; use std::vec; fn init() -> (crate::trace::Tracer, SpanData) { let provider = crate::trace::TracerProvider::default(); - let config = provider.config(); let tracer = provider.tracer("opentelemetry"); let data = SpanData { parent_span_id: SpanId::from_u64(0), @@ -272,7 +277,7 @@ mod tests { end_time: opentelemetry::time::now(), attributes: Vec::new(), dropped_attributes_count: 0, - events: crate::trace::EvictedQueue::new(config.span_limits.max_events_per_span), + events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, }; @@ -649,6 +654,35 @@ mod tests { assert_eq!(link_vec.len(), DEFAULT_MAX_LINKS_PER_SPAN as usize); } + #[test] + fn exceed_span_events_limit() { + let exporter = NoopSpanExporter::new(); + let provider_builder = + crate::trace::TracerProvider::builder().with_simple_exporter(exporter); + let provider = provider_builder.build(); + let tracer = provider.tracer("opentelemetry-test"); + + let mut events = Vec::new(); + for _i in 0..(DEFAULT_MAX_EVENT_PER_SPAN * 2) { + events.push(Event::with_name("test event")) + } + + // add events via span builder + let span_builder = tracer.span_builder("test").with_events(events); + let mut span = tracer.build(span_builder); + + // add events using span api after building the span + span.add_event("test event again, after span builder", Vec::new()); + span.add_event("test event once again, after span builder", Vec::new()); + let span_events = span + .data + .clone() + .expect("span data should not be empty as we already set it before") + .events; + let event_vec: Vec<_> = span_events.events; + assert_eq!(event_vec.len(), DEFAULT_MAX_EVENT_PER_SPAN as usize); + } + #[test] fn test_span_exported_data() { let provider = crate::trace::TracerProvider::builder() diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 395cdd73be..0f6811e6e0 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -721,7 +721,7 @@ mod tests { use crate::testing::trace::{ new_test_export_span_data, new_test_exporter, new_tokio_test_exporter, }; - use crate::trace::{BatchConfig, EvictedQueue, SpanLinks}; + use crate::trace::{BatchConfig, SpanEvents, SpanLinks}; use async_trait::async_trait; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::Debug; @@ -750,7 +750,7 @@ mod tests { end_time: opentelemetry::time::now(), attributes: Vec::new(), dropped_attributes_count: 0, - events: EvictedQueue::new(0), + events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, resource: Default::default(), diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index 4b918a2fe4..cc5497b846 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -11,7 +11,7 @@ use crate::{ trace::{ provider::{TracerProvider, TracerProviderInner}, span::{Span, SpanData}, - Config, EvictedQueue, SpanLimits, SpanLinks, + Config, SpanLimits, SpanLinks, }, InstrumentationLibrary, }; @@ -25,6 +25,8 @@ use opentelemetry::{ use std::fmt; use std::sync::{Arc, Weak}; +use super::SpanEvents; + /// `Tracer` implementation to create and manage spans #[derive(Clone)] pub struct Tracer { @@ -237,8 +239,10 @@ impl opentelemetry::trace::Tracer for Tracer { let start_time = start_time.unwrap_or_else(opentelemetry::time::now); let end_time = end_time.unwrap_or(start_time); - let mut events_queue = EvictedQueue::new(span_limits.max_events_per_span); - if let Some(mut events) = events { + let spans_events_limit = span_limits.max_events_per_span as usize; + let span_events: SpanEvents = if let Some(mut events) = events { + let dropped_count = events.len().saturating_sub(spans_events_limit); + events.truncate(spans_events_limit); let event_attributes_limit = span_limits.max_attributes_per_event as usize; for event in events.iter_mut() { let dropped_attributes_count = event @@ -248,8 +252,13 @@ impl opentelemetry::trace::Tracer for Tracer { event.attributes.truncate(event_attributes_limit); event.dropped_attributes_count = dropped_attributes_count as u32; } - events_queue.append_vec(&mut events); - } + SpanEvents { + events, + dropped_count: dropped_count as u32, + } + } else { + SpanEvents::default() + }; let span_context = SpanContext::new(trace_id, span_id, flags, false, trace_state); Span::new( @@ -262,7 +271,7 @@ impl opentelemetry::trace::Tracer for Tracer { end_time, attributes: attribute_options, dropped_attributes_count, - events: events_queue, + events: span_events, links: span_links, status, }), diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index ac3e34a50c..877c3190e8 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -107,7 +107,7 @@ impl From for Span { end_time: value.end_time, dropped_attributes_count: value.dropped_attributes_count, attributes: value.attributes.into_iter().map(Into::into).collect(), - dropped_events_count: value.events.dropped_count(), + dropped_events_count: value.events.dropped_count, events: value.events.into_iter().map(Into::into).collect(), dropped_links_count: value.links.dropped_count, links: value.links.iter().map(Into::into).collect(), diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 1b054a1db2..6e21b52588 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -60,8 +60,8 @@ mod tests { use crate::exporter::model::span::{Kind, Span}; use crate::exporter::model::{into_zipkin_span, OTEL_ERROR_DESCRIPTION, OTEL_STATUS_CODE}; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId}; - use opentelemetry_sdk::trace::SpanLinks; - use opentelemetry_sdk::{export::trace::SpanData, trace::EvictedQueue, Resource}; + use opentelemetry_sdk::trace::{SpanEvents, SpanLinks}; + use opentelemetry_sdk::{export::trace::SpanData, Resource}; use std::borrow::Cow; use std::collections::HashMap; use std::net::Ipv4Addr; @@ -163,7 +163,7 @@ mod tests { end_time: SystemTime::now(), attributes: Vec::new(), dropped_attributes_count: 0, - events: EvictedQueue::new(20), + events: SpanEvents::default(), links: SpanLinks::default(), status, resource: Cow::Owned(Resource::default()), From 4f1c3a26ff83507f339c2d29b2a67b9d3ccb09a2 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 9 Nov 2023 12:26:21 -0800 Subject: [PATCH 16/68] Improve contributing guidelines (#1354) --- CONTRIBUTING.md | 70 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dbbd63547b..15b3ca2a5c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,22 @@ # Contributing to opentelemetry-rust -The Rust special interest group (SIG) meets regularly. See the -OpenTelemetry -[community](https://github.com/open-telemetry/community#implementation-sigs) -repo for information on this and other language SIGs. - -See the [public meeting -notes](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit) -for a summary description of past meetings. To request edit access, -join the meeting or get in touch on +The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific +Time (16:00 UTC). The meeting is subject to change depending on contributors' +availability. Check the [OpenTelemetry community +calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) +for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of +meeting for this group. + +Meeting notes are available as a public [Google +doc](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit). +If you have trouble accessing the doc, please get in touch on [Slack](https://cloud-native.slack.com/archives/C03GDP0H023). +The meeting is open for all to join. We invite everyone to join our meeting, +regardless of your experience level. Whether you're a seasoned OpenTelemetry +developer, just starting your journey, or simply curious about the work we do, +you're more than welcome to participate! + ## Pull Requests ### Prerequisites @@ -64,20 +70,32 @@ the repo to catch any issues locally. ### How to Receive Comments -* If the PR is not ready for review, please put `[WIP]` in the title, - tag it as `work-in-progress`, or mark it as - [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and CI is clear. +* If the PR is not ready for review, please put `[WIP]` in the title or mark it + as [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and all required CI checks are clear. +* Submit small, focused PRs addressing a single concern/issue. +* Make sure the PR title reflects the contribution. +* Write a summary that helps understand the change. +* Include usage examples in the summary, where applicable. +* Include benchmarks (before/after) in the summary, for contributions that are + performance enhancements. ### How to Get PRs Merged A PR is considered to be **ready to merge** when: -* It has received approval from Collaborators/Maintainers. -* Major feedback is resolved. +* It has received approval from + [Approvers](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver). + / + [Maintainers](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer). +* Major feedbacks are resolved. -Any Collaborator/Maintainer can merge the PR once it is **ready to -merge**. +Any Maintainer can merge the PR once it is **ready to merge**. Note, that some +PRs may not be merged immediately if the repo is in the process of a release and +the maintainers decided to defer the PR to the next release train. Also, +maintainers may decide to wait for more than one approval for certain PRs, +particularly ones that are affecting multiple areas, or topics that may warrant +more discussion. ## Design Choices @@ -102,23 +120,23 @@ language rather than conform to specific API names or argument patterns in the spec. For a deeper discussion, see: -https://github.com/open-telemetry/opentelemetry-specification/issues/165 + ### Error Handling -Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. + +Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. ### Priority of configurations + OpenTelemetry supports multiple ways to configure the API, SDK and other components. The priority of configurations is as follows: - Environment variables - Compiling time configurations provided in the source code - - ## Style Guide * Run `cargo clippy --all` - this will catch common mistakes and improve @@ -131,6 +149,7 @@ issues. * Run `cargo test --all` - this will execute code and doc tests for all projects in this workspace. * Run `cargo bench` - this will run benchmarks to show performance +* Run `cargo bench` - this will run benchmarks to show performance regressions ## Approvers and Maintainers @@ -165,5 +184,12 @@ repo](https://github.com/open-telemetry/community/blob/master/community-membersh [![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-rust)](https://github.com/open-telemetry/opentelemetry-rust/graphs/contributors) ## FAQ + ### Where should I put third party propagators/exporters, contrib or standalone crates? -As of now, the specification classify the propagators into three categories: Fully opened standards, platform-specific standards, proprietary headers. The conclusion is only the fully opened standards should live in SDK packages/repos. So here, only fully opened standards should live as independent crate. For more detail and discussion, see [this pr](https://github.com/open-telemetry/opentelemetry-specification/pull/1144). + +As of now, the specification classify the propagators into three categories: +Fully opened standards, platform-specific standards, proprietary headers. The +conclusion is only the fully opened standards should live in SDK packages/repos. +So here, only fully opened standards should live as independent crate. For more +detail and discussion, see [this +pr](https://github.com/open-telemetry/opentelemetry-specification/pull/1144). From 5527a11b69342dbe54906941859ab898abf7020e Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Thu, 9 Nov 2023 18:24:28 -0500 Subject: [PATCH 17/68] Fix metric export data for unused gauges (#1363) --- opentelemetry-sdk/CHANGELOG.md | 8 ++++++-- opentelemetry-sdk/src/metrics/internal/last_value.rs | 6 +++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 919467a998..4633447021 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -13,15 +13,19 @@ *Behavior Change*: When enforcing `max_links_per_span`, `max_events_per_span` from `SpanLimits`, links/events are kept in the first-come order. The previous "eviction" based approach is no longer performed. - + *Breaking Change Affecting Exporter authors*: - + `SpanData` now stores `links` as `SpanLinks` instead of `EvictedQueue` where `SpanLinks` is a struct with a `Vec` of links and `dropped_count`. `SpanData` now stores `events` as `SpanEvents` instead of `EvictedQueue` where `SpanEvents` is a struct with a `Vec` of events and `dropped_count`. +### Fixed + +- Fix metric export corruption if gauges have not received a last value. (#1363) + ## v0.21.0 ### Added diff --git a/opentelemetry-sdk/src/metrics/internal/last_value.rs b/opentelemetry-sdk/src/metrics/internal/last_value.rs index 28e9a0a107..57f35fc166 100644 --- a/opentelemetry-sdk/src/metrics/internal/last_value.rs +++ b/opentelemetry-sdk/src/metrics/internal/last_value.rs @@ -55,8 +55,12 @@ impl> LastValue { pub(crate) fn compute_aggregation(&self, dest: &mut Vec>) { let mut values = match self.values.lock() { Ok(guard) if !guard.is_empty() => guard, - _ => return, + _ => { + dest.clear(); // poisoned or no values recorded yet + return; + } }; + let n = values.len(); if n > dest.capacity() { dest.reserve(n - dest.capacity()); From ddfe9ffa2b194b392369809591597d5beb617636 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 9 Nov 2023 15:32:28 -0800 Subject: [PATCH 18/68] Requesting to move Cijo Thomas from Approver to Maintainer (#1355) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 15b3ca2a5c..4753b2b333 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -158,13 +158,13 @@ For GitHub groups see the [code owners](CODEOWNERS) file. ### Maintainers +* [Cijo Thomas](https://github.com/cijothomas) * [Harold Dost](https://github.com/hdost) * [Julian Tescher](https://github.com/jtescher) * [Zhongyang Wu](https://github.com/TommyCpp) ### Approvers -* [Cijo Thomas](https://github.com/cijothomas) * [Lalit Kumar Bhasin](https://github.com/lalitb) * [Shaun Cox](https://github.com/shaun-cox) From 500fdfca0966eb7e18b6826a4f0ddb38b85fd8e8 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 10 Nov 2023 07:50:22 -0800 Subject: [PATCH 19/68] Nit fixes to md file to reduce warnings (#1364) --- .cspell.json | 20 ++++++++++++++++ .github/workflows/ci.yml | 2 ++ CODEOWNERS | 10 ++++---- CONTRIBUTING.md | 42 ++++++++++++++++----------------- README.md | 34 +++++++++++++------------- opentelemetry-jaeger/src/lib.rs | 4 ++-- 6 files changed, 67 insertions(+), 45 deletions(-) diff --git a/.cspell.json b/.cspell.json index 48738acf68..da552a5265 100644 --- a/.cspell.json +++ b/.cspell.json @@ -25,13 +25,33 @@ // these are words that are always correct and can be thought of as our // workspace dictionary. "words": [ + "actix", + "appender", + "appenders", + "Bhasin", + "Cijo", + "codecov", "deque", + "Dirkjan", "hasher", + "isahc", + "Isobel", + "jaegertracing", + "Kühle", + "Kumar", + "Lalit", "msrv", + "Ochtman", + "openetelemetry", "opentelemetry", "OTLP", + "protoc", "quantile", + "Redelmeier", + "reqwest", "rustc", + "Tescher", + "Zhongyang", "zipkin" ], "enabledLanguageIds": [ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 354afc089d..af39186680 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,8 @@ on: push: branches: - main + paths-ignore: + - '**.md' jobs: test: strategy: diff --git a/CODEOWNERS b/CODEOWNERS index d6962a905a..7931a166ff 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,5 +1,7 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. +# Code owners file -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers +## This file controls who is tagged for review for any given pull request. + +## For anything not explicitly taken by someone else: + +* @open-telemetry/rust-approvers diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4753b2b333..e831d0df42 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,7 +21,7 @@ you're more than welcome to participate! ### Prerequisites -Crate `opentelemetry-otlp` uses gRPC + Protocol Buffers.
+Crate `opentelemetry-otlp` uses gRPC + Protocol Buffers. You can provide the protocol compiler protoc path programmatically (only works with tonic) or build it from source ```sh @@ -40,13 +40,13 @@ Everyone is welcome to contribute code to `opentelemetry-rust` via GitHub pull requests (PRs). ```sh -$ git clone --recurse-submodule https://github.com/open-telemetry/opentelemetry-rust +git clone --recurse-submodule https://github.com/open-telemetry/opentelemetry-rust ``` Enter the newly created directory and add your fork as a new remote: ```sh -$ git remote add git@github.com:/opentelemetry-rust +git remote add git@github.com:/opentelemetry-rust ``` Check out a new branch, make modifications, run linters and tests, and @@ -70,25 +70,25 @@ the repo to catch any issues locally. ### How to Receive Comments -* If the PR is not ready for review, please put `[WIP]` in the title or mark it +- If the PR is not ready for review, please put `[WIP]` in the title or mark it as [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and all required CI checks are clear. -* Submit small, focused PRs addressing a single concern/issue. -* Make sure the PR title reflects the contribution. -* Write a summary that helps understand the change. -* Include usage examples in the summary, where applicable. -* Include benchmarks (before/after) in the summary, for contributions that are +- Make sure CLA is signed and all required CI checks are clear. +- Submit small, focused PRs addressing a single concern/issue. +- Make sure the PR title reflects the contribution. +- Write a summary that helps understand the change. +- Include usage examples in the summary, where applicable. +- Include benchmarks (before/after) in the summary, for contributions that are performance enhancements. ### How to Get PRs Merged A PR is considered to be **ready to merge** when: -* It has received approval from +- It has received approval from [Approvers](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver). / [Maintainers](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer). -* Major feedbacks are resolved. +- Major feedbacks are resolved. Any Maintainer can merge the PR once it is **ready to merge**. Note, that some PRs may not be merged immediately if the repo is in the process of a release and @@ -124,7 +124,7 @@ For a deeper discussion, see: ### Error Handling -Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. +Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. @@ -139,17 +139,17 @@ OpenTelemetry supports multiple ways to configure the API, SDK and other compone ## Style Guide -* Run `cargo clippy --all` - this will catch common mistakes and improve +- Run `cargo clippy --all` - this will catch common mistakes and improve your Rust code -* Run `cargo fmt` - this will find and fix code formatting +- Run `cargo fmt` - this will find and fix code formatting issues. ## Testing and Benchmarking -* Run `cargo test --all` - this will execute code and doc tests for all +- Run `cargo test --all` - this will execute code and doc tests for all projects in this workspace. -* Run `cargo bench` - this will run benchmarks to show performance -* Run `cargo bench` - this will run benchmarks to show performance +- Run `cargo bench` - this will run benchmarks to show performance +- Run `cargo bench` - this will run benchmarks to show performance regressions ## Approvers and Maintainers @@ -170,9 +170,9 @@ For GitHub groups see the [code owners](CODEOWNERS) file. ### Emeritus -* [Dirkjan Ochtman](https://github.com/djc) -* [Jan Kühle](https://github.com/frigus02) -* [Isobel Redelmeier](https://github.com/iredelmeier) +- [Dirkjan Ochtman](https://github.com/djc) +- [Jan Kühle](https://github.com/frigus02) +- [Isobel Redelmeier](https://github.com/iredelmeier) ### Become an Approver or a Maintainer diff --git a/README.md b/README.md index 9068a7064f..ea6227b176 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,6 @@ observability tools. [Jaeger]: https://www.jaegertracing.io [msrv]: #supported-rust-versions - ## Project Status | Signal | Status | @@ -101,36 +100,36 @@ adapter crates to assist in propagating state and instrumenting applications. In particular, the following crates are likely to be of interest: -- [`opentelemetry-aws`] provides unofficial propagators for AWS X-ray. -- [`opentelemetry-datadog`] provides additional exporters to [`Datadog`]. -- [`opentelemetry-dynatrace`] provides additional exporters to Dynatrace. -- [`opentelemetry-contrib`] provides additional exporters and propagators that +* [`opentelemetry-aws`] provides unofficial propagators for AWS X-ray. +* [`opentelemetry-datadog`] provides additional exporters to [`Datadog`]. +* [`opentelemetry-dynatrace`] provides additional exporters to Dynatrace. +* [`opentelemetry-contrib`] provides additional exporters and propagators that are experimental. -- [`opentelemetry-http`] provides an interface for injecting and extracting +* [`opentelemetry-http`] provides an interface for injecting and extracting trace information from [`http`] headers. -- [`opentelemetry-jaeger`] provides a pipeline and exporter for sending trace +* [`opentelemetry-jaeger`] provides a pipeline and exporter for sending trace information to [`Jaeger`]. -- [`opentelemetry-otlp`] exporter for sending trace and metric data in the OTLP +* [`opentelemetry-otlp`] exporter for sending trace and metric data in the OTLP format to the OpenTelemetry collector. -- [`opentelemetry-prometheus`] provides a pipeline and exporter for sending +* [`opentelemetry-prometheus`] provides a pipeline and exporter for sending metrics information to [`Prometheus`]. -- [`opentelemetry-semantic-conventions`] provides standard names and semantic +* [`opentelemetry-semantic-conventions`] provides standard names and semantic otel conventions. -- [`opentelemetry-stackdriver`] provides an exporter for Google's [Cloud Trace] +* [`opentelemetry-stackdriver`] provides an exporter for Google's [Cloud Trace] (which used to be called StackDriver). -- [`opentelemetry-zipkin`] provides a pipeline and exporter for sending trace +* [`opentelemetry-zipkin`] provides a pipeline and exporter for sending trace information to [`Zipkin`]. Additionally, there are also several third-party crates which are not maintained by the `opentelemetry` project. These include: -- [`tracing-opentelemetry`] provides integration for applications instrumented +* [`tracing-opentelemetry`] provides integration for applications instrumented using the [`tracing`] API and ecosystem. -- [`actix-web-opentelemetry`] provides integration for the [`actix-web`] web +* [`actix-web-opentelemetry`] provides integration for the [`actix-web`] web server and ecosystem. -- [`opentelemetry-application-insights`] provides an unofficial [Azure +* [`opentelemetry-application-insights`] provides an unofficial [Azure Application Insights] exporter. -- [`opentelemetry-tide`] provides integration for the [`Tide`] web server and +* [`opentelemetry-tide`] provides integration for the [`Tide`] web server and ecosystem. If you're the maintainer of an `opentelemetry` ecosystem crate not listed @@ -149,7 +148,6 @@ above, please let us know! We'd love to add your project to the list! [`opentelemetry-contrib`]: https://crates.io/crates/opentelemetry-contrib [`Datadog`]: https://www.datadoghq.com [`opentelemetry-datadog`]: https://crates.io/crates/opentelemetry-datadog -[`Dynatrace`]: https://www.dynatrace.com/ [`opentelemetry-dynatrace`]: https://crates.io/crates/opentelemetry-dynatrace [`opentelemetry-semantic-conventions`]: https://crates.io/crates/opentelemetry-semantic-conventions [`http`]: https://crates.io/crates/http @@ -182,7 +180,7 @@ this policy. See the [contributing file](CONTRIBUTING.md). -The Rust special interest group (SIG) meets weekly on Tuesdays at 9 AM Pacific +The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific Time (16:00 UTC). The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) diff --git a/opentelemetry-jaeger/src/lib.rs b/opentelemetry-jaeger/src/lib.rs index bd4865a212..3febd3a0de 100644 --- a/opentelemetry-jaeger/src/lib.rs +++ b/opentelemetry-jaeger/src/lib.rs @@ -151,7 +151,7 @@ //! You can set it using one of the following methods from highest priority to lowest priority. //! 1. [`with_service_name`]. //! 2. include a `service.name` key value pairs when configure resource using [`with_trace_config`]. -//! 3. set the service name as `OTEL_SERVCE_NAME` environment variable. +//! 3. set the service name as `OTEL_SERVICE_NAME` environment variable. //! 4. set the `service.name` attributes in `OTEL_RESOURCE_ATTRIBUTES`. //! 5. if the service name is not provided by the above method. `unknown_service` will be used. //! @@ -207,7 +207,7 @@ //! ``` //! //! ### Export to collectors -//! Note that this example requires `collecotr_client` and `isahc_collector_client` feature. +//! Note that this example requires `collector_client` and `isahc_collector_client` feature. //! ```ignore //! use opentelemetry::{global, KeyValue, trace::{Tracer, TraceError}}; //! use opentelemetry_sdk::{trace::{config, RandomIdGenerator, Sampler}, Resource}; From a74ecd1316f41e231dea0b9d36fdcd8f093056df Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Fri, 10 Nov 2023 09:39:06 -0800 Subject: [PATCH 20/68] [Tracing Appender] Propagate event name to exporters (#1346) --- opentelemetry-appender-tracing/Cargo.toml | 2 +- opentelemetry-appender-tracing/examples/basic.rs | 2 +- opentelemetry-appender-tracing/src/layer.rs | 5 +++++ opentelemetry-user-events-logs/Cargo.toml | 2 +- opentelemetry-user-events-logs/examples/basic.rs | 2 +- .../src/logs/exporter.rs | 16 ++++++++++++---- 6 files changed, 21 insertions(+), 8 deletions(-) diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index c2eba3c4d7..7c2e389ebc 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -13,7 +13,7 @@ rust-version = "1.65" [dependencies] opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["logs"] } opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["logs"] } -tracing = {version = "0.1.37", default-features = false, features = ["std"]} +tracing = {version = "0.1", default-features = false, features = ["std"]} tracing-core = "0.1.31" tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } once_cell = "1.13.0" diff --git a/opentelemetry-appender-tracing/examples/basic.rs b/opentelemetry-appender-tracing/examples/basic.rs index 32040ab48f..9ca33150f0 100644 --- a/opentelemetry-appender-tracing/examples/basic.rs +++ b/opentelemetry-appender-tracing/examples/basic.rs @@ -23,6 +23,6 @@ fn main() { let layer = layer::OpenTelemetryTracingBridge::new(&provider); tracing_subscriber::registry().with(layer).init(); - error!(target: "my-system", event_id = 20, event_name = "my-event_name", user_name = "otel", user_email = "otel@opentelemetry.io"); + error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); drop(provider); } diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index e61648184e..840e4a6ae2 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -103,6 +103,11 @@ where log_record.severity_number = Some(map_severity_to_otel_severity(meta.level().as_str())); log_record.severity_text = Some(meta.level().to_string().into()); + // add the `name` metadata to attributes + // TBD - Propose this to be part of log_record metadata. + let vec = vec![("name", meta.name())]; + log_record.attributes = Some(vec.into_iter().map(|(k, v)| (k.into(), v.into())).collect()); + // Not populating ObservedTimestamp, instead relying on OpenTelemetry // API to populate it with current time. diff --git a/opentelemetry-user-events-logs/Cargo.toml b/opentelemetry-user-events-logs/Cargo.toml index 708ab85855..e0d545f230 100644 --- a/opentelemetry-user-events-logs/Cargo.toml +++ b/opentelemetry-user-events-logs/Cargo.toml @@ -21,7 +21,7 @@ chrono = { version="0.4", default-features = false, features=["std"] } [dev-dependencies] opentelemetry-appender-tracing = { path = "../opentelemetry-appender-tracing" } -tracing = { version = "0.1.37", default-features = false, features = ["std"] } +tracing = { version = "0.1", default-features = false, features = ["std"] } tracing-core = "0.1.31" tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } microbench = "0.5" diff --git a/opentelemetry-user-events-logs/examples/basic.rs b/opentelemetry-user-events-logs/examples/basic.rs index c022b37f95..67977cd912 100644 --- a/opentelemetry-user-events-logs/examples/basic.rs +++ b/opentelemetry-user-events-logs/examples/basic.rs @@ -30,7 +30,7 @@ fn main() { // event_id is also passed as an attribute now, there is nothing in metadata where a // numeric id can be stored. error!( - event_name = "my-event-name", + name: "my-event-name", event_id = 20, user_name = "otel user", user_email = "otel@opentelemetry.io" diff --git a/opentelemetry-user-events-logs/src/logs/exporter.rs b/opentelemetry-user-events-logs/src/logs/exporter.rs index c937543b20..3825af496d 100644 --- a/opentelemetry-user-events-logs/src/logs/exporter.rs +++ b/opentelemetry-user-events-logs/src/logs/exporter.rs @@ -52,7 +52,8 @@ pub(crate) struct UserEventsExporter { } const EVENT_ID: &str = "event_id"; -const EVENT_NAME: &str = "event_name"; +const EVENT_NAME_PRIMARY: &str = "event_name"; +const EVENT_NAME_SECONDARY: &str = "name"; //TBD - How to configure provider name and provider group impl UserEventsExporter { @@ -222,11 +223,17 @@ impl UserEventsExporter { event_id = *value; continue; } - (EVENT_NAME, AnyValue::String(value)) => { + (EVENT_NAME_PRIMARY, AnyValue::String(value)) => { is_event_name = true; event_name = value.as_str(); continue; } + (EVENT_NAME_SECONDARY, AnyValue::String(value)) => { + if !is_event_name { + event_name = value.as_str(); + } + continue; + } _ => { if !is_part_c_present { eb.add_struct_with_bookmark("PartC", 1, 0, &mut cs_c_bookmark); @@ -237,6 +244,7 @@ impl UserEventsExporter { } } } + if is_part_c_present { eb.set_struct_field_count(cs_c_bookmark, cs_c_count); } @@ -245,7 +253,7 @@ impl UserEventsExporter { let mut cs_b_bookmark: usize = 0; let mut cs_b_count = 0; eb.add_struct_with_bookmark("PartB", 1, 0, &mut cs_b_bookmark); - eb.add_str("_typename", "Logs", FieldFormat::Default, 0); + eb.add_str("_typeName", "Logs", FieldFormat::Default, 0); cs_b_count += 1; if log_data.record.body.is_some() { @@ -282,7 +290,7 @@ impl UserEventsExporter { eb.add_value("eventId", event_id, FieldFormat::SignedInt, 0); cs_b_count += 1; } - if is_event_name { + if !event_name.is_empty() { eb.add_str("name", event_name, FieldFormat::Default, 0); cs_b_count += 1; } From 7bb97d5e305c990ea844dac5c220d1157fecf7b3 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Fri, 10 Nov 2023 15:10:58 -0500 Subject: [PATCH 21/68] Return consistent `Meter` for a given `MeterProvider` (#1351) --- opentelemetry-sdk/CHANGELOG.md | 1 + .../src/metrics/meter_provider.rs | 33 ++++++++++++------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 4633447021..a5fadd3a1c 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -25,6 +25,7 @@ ### Fixed - Fix metric export corruption if gauges have not received a last value. (#1363) +- Return consistent `Meter` for a given scope from `MeterProvider`. (#1351) ## v0.21.0 diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index 6f3178083c..53128de1d8 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -1,16 +1,15 @@ use core::fmt; use std::{ borrow::Cow, + collections::HashMap, sync::{ atomic::{AtomicBool, Ordering}, - Arc, + Arc, Mutex, }, }; use opentelemetry::{ - metrics::{ - noop::NoopMeterCore, InstrumentProvider, Meter, MeterProvider, MetricsError, Result, - }, + metrics::{noop::NoopMeterCore, Meter, MeterProvider, MetricsError, Result}, KeyValue, }; @@ -28,6 +27,7 @@ use super::{meter::SdkMeter, pipeline::Pipelines, reader::MetricReader, view::Vi #[derive(Clone, Debug)] pub struct SdkMeterProvider { pipes: Arc, + meters: Arc>>>, is_shutdown: Arc, } @@ -123,15 +123,23 @@ impl MeterProvider for SdkMeterProvider { schema_url: Option>>, attributes: Option>, ) -> Meter { - let inst_provider: Arc = - if !self.is_shutdown.load(Ordering::Relaxed) { - let scope = Scope::new(name, version, schema_url, attributes); - Arc::new(SdkMeter::new(scope, self.pipes.clone())) - } else { - Arc::new(NoopMeterCore::new()) - }; + if self.is_shutdown.load(Ordering::Relaxed) { + return Meter::new(Arc::new(NoopMeterCore::new())); + } + + let scope = Scope::new(name, version, schema_url, attributes); - Meter::new(inst_provider) + if let Ok(mut meters) = self.meters.lock() { + let meter = meters + .entry(scope) + .or_insert_with_key(|scope| { + Arc::new(SdkMeter::new(scope.clone(), self.pipes.clone())) + }) + .clone(); + Meter::new(meter) + } else { + Meter::new(Arc::new(NoopMeterCore::new())) + } } } @@ -186,6 +194,7 @@ impl MeterProviderBuilder { self.readers, self.views, )), + meters: Default::default(), is_shutdown: Arc::new(AtomicBool::new(false)), } } From 5fc4101826b4641cd40676cd2310ee65b1e3f365 Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Sun, 12 Nov 2023 16:48:42 +0100 Subject: [PATCH 22/68] Use faster Rng in RandomIdGenerator (0%-6% performance improvement) (#1106) Move to SmallRng from ThreadRng SmallRng provides 0-6% improvement in Traces. Relates #808 --- opentelemetry-sdk/Cargo.toml | 3 ++- opentelemetry-sdk/src/trace/id_generator/mod.rs | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index a755a75734..ec7c73be5b 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -21,7 +21,7 @@ futures-util = { version = "0.3.17", default-features = false, features = ["std" once_cell = "1.10" ordered-float = "4.0" percent-encoding = { version = "2.0", optional = true } -rand = { version = "0.8", default-features = false, features = ["std", "std_rng"], optional = true } +rand = { version = "0.8", default-features = false, features = ["std", "std_rng","small_rng"], optional = true } glob = {version = "0.3.1", optional =true} serde = { version = "1.0", features = ["derive", "rc"], optional = true } serde_json = { version = "1", optional = true } @@ -68,6 +68,7 @@ harness = false [[bench]] name = "trace" harness = false +required-features = ["testing"] [[bench]] name = "batch_span_processor" diff --git a/opentelemetry-sdk/src/trace/id_generator/mod.rs b/opentelemetry-sdk/src/trace/id_generator/mod.rs index b0c73e6a1c..27cea4cb19 100644 --- a/opentelemetry-sdk/src/trace/id_generator/mod.rs +++ b/opentelemetry-sdk/src/trace/id_generator/mod.rs @@ -2,7 +2,7 @@ pub(super) mod aws; use opentelemetry::trace::{SpanId, TraceId}; -use rand::{rngs, Rng}; +use rand::{rngs, Rng, SeedableRng}; use std::cell::RefCell; use std::fmt; @@ -35,5 +35,5 @@ impl IdGenerator for RandomIdGenerator { thread_local! { /// Store random number generator for each thread - static CURRENT_RNG: RefCell = RefCell::new(rngs::ThreadRng::default()); + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); } From dd03fdae1e1109d7e05a523363549de2b99e1e3f Mon Sep 17 00:00:00 2001 From: Kristopher Wuollett Date: Sun, 12 Nov 2023 12:37:04 -0600 Subject: [PATCH 23/68] Add generated modules that output const &str for `tracing` compatibility (#1334) * feat: add generated modules that output const &str for tracing compatibility * fix: add tracing as a dev-dependency for doc examples * fix: remove unused code * fix: remove tracing examples from semconv docs * docs: remove extra whitespace that was added previously --- opentelemetry-datadog/src/exporter/mod.rs | 4 +- .../src/exporter/config/mod.rs | 4 +- .../examples/basic-otlp-http/src/main.rs | 4 +- .../examples/basic-otlp/src/main.rs | 4 +- .../src/main.rs | 6 +- .../tests/integration_test.rs | 12 +- .../CHANGELOG.md | 1 + .../scripts/templates/header_resource.rs | 5 +- .../scripts/templates/header_trace.rs | 7 +- .../templates/semantic_attributes.rs.j2 | 4 +- .../src/resource.rs | 216 ++++----- .../src/trace.rs | 459 ++++++++---------- opentelemetry-stackdriver/src/lib.rs | 80 +-- opentelemetry-zipkin/src/exporter/mod.rs | 4 +- 14 files changed, 396 insertions(+), 414 deletions(-) diff --git a/opentelemetry-datadog/src/exporter/mod.rs b/opentelemetry-datadog/src/exporter/mod.rs index 9b07183390..576b5cb2e8 100644 --- a/opentelemetry-datadog/src/exporter/mod.rs +++ b/opentelemetry-datadog/src/exporter/mod.rs @@ -212,7 +212,7 @@ impl DatadogPipelineBuilder { cfg.resource = Cow::Owned(Resource::new( cfg.resource .iter() - .filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME) + .filter(|(k, _v)| k.as_str() != semcov::resource::SERVICE_NAME) .map(|(k, v)| KeyValue::new(k.clone(), v.clone())), )); cfg @@ -226,7 +226,7 @@ impl DatadogPipelineBuilder { } else { let service_name = SdkProvidedResourceDetector .detect(Duration::from_secs(0)) - .get(semcov::resource::SERVICE_NAME) + .get(semcov::resource::SERVICE_NAME.into()) .unwrap() .to_string(); ( diff --git a/opentelemetry-jaeger/src/exporter/config/mod.rs b/opentelemetry-jaeger/src/exporter/config/mod.rs index ea598b417d..2967fd1879 100644 --- a/opentelemetry-jaeger/src/exporter/config/mod.rs +++ b/opentelemetry-jaeger/src/exporter/config/mod.rs @@ -61,7 +61,7 @@ fn build_config_and_process( let service_name = service_name_opt.unwrap_or_else(|| { config .resource - .get(semcov::resource::SERVICE_NAME) + .get(semcov::resource::SERVICE_NAME.into()) .map(|v| v.to_string()) .unwrap_or_else(|| "unknown_service".to_string()) }); @@ -70,7 +70,7 @@ fn build_config_and_process( let mut tags = config .resource .iter() - .filter(|(key, _)| **key != semcov::resource::SERVICE_NAME) + .filter(|(key, _)| key.as_str() != semcov::resource::SERVICE_NAME) .map(|(key, value)| KeyValue::new(key.clone(), value.clone())) .collect::>(); diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 36b4fdae1f..0f320dcb8d 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -91,11 +91,11 @@ async fn main() -> Result<(), Box> { "Nice operation!".to_string(), vec![Key::new("bogons").i64(100)], ); - span.set_attribute(ANOTHER_KEY.string("yes")); + span.set_attribute(KeyValue::new(ANOTHER_KEY, "yes")); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); - span.set_attribute(LEMONS_KEY.string("five")); + span.set_attribute(KeyValue::new(LEMONS_KEY, "five")); span.add_event("Sub span event", vec![]); }); diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 7892151176..1bf5f7e421 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -121,13 +121,13 @@ async fn main() -> Result<(), Box> { "Nice operation!".to_string(), vec![Key::new("bogons").i64(100)], ); - span.set_attribute(ANOTHER_KEY.string("yes")); + span.set_attribute(KeyValue::new(ANOTHER_KEY, "yes")); info!(target: "my-target", "hello from {}. My price is {}. I am also inside a Span!", "banana", 2.99); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); - span.set_attribute(LEMONS_KEY.string("five")); + span.set_attribute(KeyValue::new(LEMONS_KEY, "five")); span.add_event("Sub span event", vec![]); diff --git a/opentelemetry-otlp/examples/external-otlp-grpcio-async-std/src/main.rs b/opentelemetry-otlp/examples/external-otlp-grpcio-async-std/src/main.rs index 72c5bf5f9b..1555eab935 100644 --- a/opentelemetry-otlp/examples/external-otlp-grpcio-async-std/src/main.rs +++ b/opentelemetry-otlp/examples/external-otlp-grpcio-async-std/src/main.rs @@ -11,7 +11,7 @@ use opentelemetry::{ global::{shutdown_tracer_provider, tracer}, trace::TraceError, trace::{TraceContextExt, Tracer}, - Key, + Key, KeyValue, }; use opentelemetry_otlp::WithExportConfig; use std::{ @@ -85,11 +85,11 @@ async fn main() -> Result<(), Box> { "Nice operation!".to_string(), vec![Key::new("bogons").i64(100)], ); - span.set_attribute(ANOTHER_KEY.string("yes")); + span.set_attribute(KeyValue::new(ANOTHER_KEY, "yes")); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); - span.set_attribute(LEMONS_KEY.string("five")); + span.set_attribute(KeyValue::new(LEMONS_KEY, "five")); span.add_event("Sub span event", vec![]); }); diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 20dabd4076..0deb0d3105 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -327,9 +327,9 @@ fn prometheus_exporter_integration() { .merge(&mut Resource::new( vec![ // always specify service.name because the default depends on the running OS - SERVICE_NAME.string("prometheus_test"), + KeyValue::new(SERVICE_NAME, "prometheus_test"), // Overwrite the semconv.TelemetrySDKVersionKey value so we don't need to update every version - TELEMETRY_SDK_VERSION.string("latest"), + KeyValue::new(TELEMETRY_SDK_VERSION, "latest"), ] .into_iter() .chain(tc.custom_resource_attrs.into_iter()), @@ -390,9 +390,9 @@ fn multiple_scopes() { ) .merge(&mut Resource::new(vec![ // always specify service.name because the default depends on the running OS - SERVICE_NAME.string("prometheus_test"), + KeyValue::new(SERVICE_NAME, "prometheus_test"), // Overwrite the semconv.TelemetrySDKVersionKey value so we don't need to update every version - TELEMETRY_SDK_VERSION.string("latest"), + KeyValue::new(TELEMETRY_SDK_VERSION, "latest"), ])); let provider = SdkMeterProvider::builder() @@ -722,9 +722,9 @@ fn duplicate_metrics() { .merge(&mut Resource::new( vec![ // always specify service.name because the default depends on the running OS - SERVICE_NAME.string("prometheus_test"), + KeyValue::new(SERVICE_NAME, "prometheus_test"), // Overwrite the semconv.TelemetrySDKVersionKey value so we don't need to update every version - TELEMETRY_SDK_VERSION.string("latest"), + KeyValue::new(TELEMETRY_SDK_VERSION, "latest"), ] .into_iter() .chain(tc.custom_resource_attrs.into_iter()), diff --git a/opentelemetry-semantic-conventions/CHANGELOG.md b/opentelemetry-semantic-conventions/CHANGELOG.md index 6002c93754..70fa0c5629 100644 --- a/opentelemetry-semantic-conventions/CHANGELOG.md +++ b/opentelemetry-semantic-conventions/CHANGELOG.md @@ -6,6 +6,7 @@ ### Changed +- Replaced Key constants with &'static str [#1320] - Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) - Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs b/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs index af7c416c1e..f816eafc3a 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs +++ b/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs @@ -8,13 +8,14 @@ //! ## Usage //! //! ``` +//! use opentelemetry::KeyValue; //! use opentelemetry_sdk::{trace::{config, TracerProvider}, Resource}; //! use opentelemetry_semantic_conventions as semconv; //! //! let _tracer = TracerProvider::builder() //! .with_config(config().with_resource(Resource::new(vec![ -//! semconv::resource::SERVICE_NAME.string("my-service"), -//! semconv::resource::SERVICE_NAMESPACE.string("my-namespace"), +//! KeyValue::new(semconv::resource::SERVICE_NAME, "my-service"), +//! KeyValue::new(semconv::resource::SERVICE_NAMESPACE, "my-namespace"), //! ]))) //! .build(); //! ``` diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs b/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs index 7487abb82b..ca8a1b2f12 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs +++ b/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs @@ -8,15 +8,16 @@ //! ## Usage //! //! ``` +//! use opentelemetry::KeyValue; //! use opentelemetry::{global, trace::Tracer as _}; -//! use opentelemetry_semantic_conventions as semcov; +//! use opentelemetry_semantic_conventions as semconv; //! //! let tracer = global::tracer("my-component"); //! let _span = tracer //! .span_builder("span-name") //! .with_attributes(vec![ -//! semcov::trace::NET_PEER_NAME.string("example.org"), -//! semcov::trace::NET_PEER_PORT.i64(80), +//! KeyValue::new(semconv::trace::NET_PEER_NAME, "example.org"), +//! KeyValue::new(semconv::trace::NET_PEER_PORT, 80i64), //! ]) //! .start(&tracer); //! ``` diff --git a/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 index fcbb1bc2e1..604d6233cf 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 +++ b/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 @@ -6,8 +6,6 @@ {% include 'header_' + conventions + '.rs' %} -use opentelemetry::Key; - {%- for attribute in attributes if attribute.is_local and not attribute.ref %} /// {% filter escape %}{{attribute.brief | to_doc_brief}}.{% endfilter %} @@ -28,5 +26,5 @@ use opentelemetry::Key; {%- if (attribute.stability | string()) == "StabilityLevel.DEPRECATED" %} #[deprecated] {%- endif %} -pub const {{attribute.fqn | to_const_name}}: Key = Key::from_static_str("{{attribute.fqn}}"); +pub const {{attribute.fqn | to_const_name}}: &str = "{{attribute.fqn}}"; {%- endfor %} diff --git a/opentelemetry-semantic-conventions/src/resource.rs b/opentelemetry-semantic-conventions/src/resource.rs index 6632bc0d18..d84510b46d 100644 --- a/opentelemetry-semantic-conventions/src/resource.rs +++ b/opentelemetry-semantic-conventions/src/resource.rs @@ -14,19 +14,18 @@ //! ## Usage //! //! ``` +//! use opentelemetry::KeyValue; //! use opentelemetry_sdk::{trace::{config, TracerProvider}, Resource}; //! use opentelemetry_semantic_conventions as semconv; //! //! let _tracer = TracerProvider::builder() //! .with_config(config().with_resource(Resource::new(vec![ -//! semconv::resource::SERVICE_NAME.string("my-service"), -//! semconv::resource::SERVICE_NAMESPACE.string("my-namespace"), +//! KeyValue::new(semconv::resource::SERVICE_NAME, "my-service"), +//! KeyValue::new(semconv::resource::SERVICE_NAMESPACE, "my-namespace"), //! ]))) //! .build(); //! ``` -use opentelemetry::Key; - /// Array of brand name and version separated by a space. /// /// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). @@ -36,7 +35,7 @@ use opentelemetry::Key; /// - ` Not A;Brand 99` /// - `Chromium 99` /// - `Chrome 99` -pub const BROWSER_BRANDS: Key = Key::from_static_str("browser.brands"); +pub const BROWSER_BRANDS: &str = "browser.brands"; /// The platform on which the browser is running. /// @@ -48,12 +47,12 @@ pub const BROWSER_BRANDS: Key = Key::from_static_str("browser.brands"); /// - `Windows` /// - `macOS` /// - `Android` -pub const BROWSER_PLATFORM: Key = Key::from_static_str("browser.platform"); +pub const BROWSER_PLATFORM: &str = "browser.platform"; /// A boolean that is true if the browser is running on a mobile device. /// /// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. -pub const BROWSER_MOBILE: Key = Key::from_static_str("browser.mobile"); +pub const BROWSER_MOBILE: &str = "browser.mobile"; /// Preferred language of the user using the browser. /// @@ -65,10 +64,10 @@ pub const BROWSER_MOBILE: Key = Key::from_static_str("browser.mobile"); /// - `en-US` /// - `fr` /// - `fr-FR` -pub const BROWSER_LANGUAGE: Key = Key::from_static_str("browser.language"); +pub const BROWSER_LANGUAGE: &str = "browser.language"; /// Name of the cloud provider. -pub const CLOUD_PROVIDER: Key = Key::from_static_str("cloud.provider"); +pub const CLOUD_PROVIDER: &str = "cloud.provider"; /// The cloud account ID the resource is assigned to. /// @@ -76,7 +75,7 @@ pub const CLOUD_PROVIDER: Key = Key::from_static_str("cloud.provider"); /// /// - `111111111111` /// - `opentelemetry` -pub const CLOUD_ACCOUNT_ID: Key = Key::from_static_str("cloud.account.id"); +pub const CLOUD_ACCOUNT_ID: &str = "cloud.account.id"; /// The geographical region the resource is running. /// @@ -86,7 +85,7 @@ pub const CLOUD_ACCOUNT_ID: Key = Key::from_static_str("cloud.account.id"); /// /// - `us-central1` /// - `us-east-1` -pub const CLOUD_REGION: Key = Key::from_static_str("cloud.region"); +pub const CLOUD_REGION: &str = "cloud.region"; /// Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP). /// @@ -113,7 +112,7 @@ pub const CLOUD_REGION: Key = Key::from_static_str("cloud.region"); /// - `arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function` /// - `//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID` /// - `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` -pub const CLOUD_RESOURCE_ID: Key = Key::from_static_str("cloud.resource_id"); +pub const CLOUD_RESOURCE_ID: &str = "cloud.resource_id"; /// Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. /// @@ -122,43 +121,43 @@ pub const CLOUD_RESOURCE_ID: Key = Key::from_static_str("cloud.resource_id"); /// # Examples /// /// - `us-east-1c` -pub const CLOUD_AVAILABILITY_ZONE: Key = Key::from_static_str("cloud.availability_zone"); +pub const CLOUD_AVAILABILITY_ZONE: &str = "cloud.availability_zone"; /// The cloud platform in use. /// /// The prefix of the service SHOULD match the one specified in `cloud.provider`. -pub const CLOUD_PLATFORM: Key = Key::from_static_str("cloud.platform"); +pub const CLOUD_PLATFORM: &str = "cloud.platform"; /// The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). /// /// # Examples /// /// - `arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9` -pub const AWS_ECS_CONTAINER_ARN: Key = Key::from_static_str("aws.ecs.container.arn"); +pub const AWS_ECS_CONTAINER_ARN: &str = "aws.ecs.container.arn"; /// The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). /// /// # Examples /// /// - `arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster` -pub const AWS_ECS_CLUSTER_ARN: Key = Key::from_static_str("aws.ecs.cluster.arn"); +pub const AWS_ECS_CLUSTER_ARN: &str = "aws.ecs.cluster.arn"; /// The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. -pub const AWS_ECS_LAUNCHTYPE: Key = Key::from_static_str("aws.ecs.launchtype"); +pub const AWS_ECS_LAUNCHTYPE: &str = "aws.ecs.launchtype"; /// The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). /// /// # Examples /// /// - `arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b` -pub const AWS_ECS_TASK_ARN: Key = Key::from_static_str("aws.ecs.task.arn"); +pub const AWS_ECS_TASK_ARN: &str = "aws.ecs.task.arn"; /// The task definition family this task definition is a member of. /// /// # Examples /// /// - `opentelemetry-family` -pub const AWS_ECS_TASK_FAMILY: Key = Key::from_static_str("aws.ecs.task.family"); +pub const AWS_ECS_TASK_FAMILY: &str = "aws.ecs.task.family"; /// The revision for this task definition. /// @@ -166,14 +165,14 @@ pub const AWS_ECS_TASK_FAMILY: Key = Key::from_static_str("aws.ecs.task.family") /// /// - `8` /// - `26` -pub const AWS_ECS_TASK_REVISION: Key = Key::from_static_str("aws.ecs.task.revision"); +pub const AWS_ECS_TASK_REVISION: &str = "aws.ecs.task.revision"; /// The ARN of an EKS cluster. /// /// # Examples /// /// - `arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster` -pub const AWS_EKS_CLUSTER_ARN: Key = Key::from_static_str("aws.eks.cluster.arn"); +pub const AWS_EKS_CLUSTER_ARN: &str = "aws.eks.cluster.arn"; /// The name(s) of the AWS log group(s) an application is writing to. /// @@ -183,7 +182,7 @@ pub const AWS_EKS_CLUSTER_ARN: Key = Key::from_static_str("aws.eks.cluster.arn") /// /// - `/aws/lambda/my-function` /// - `opentelemetry-service` -pub const AWS_LOG_GROUP_NAMES: Key = Key::from_static_str("aws.log.group.names"); +pub const AWS_LOG_GROUP_NAMES: &str = "aws.log.group.names"; /// The Amazon Resource Name(s) (ARN) of the AWS log group(s). /// @@ -192,14 +191,14 @@ pub const AWS_LOG_GROUP_NAMES: Key = Key::from_static_str("aws.log.group.names") /// # Examples /// /// - `arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*` -pub const AWS_LOG_GROUP_ARNS: Key = Key::from_static_str("aws.log.group.arns"); +pub const AWS_LOG_GROUP_ARNS: &str = "aws.log.group.arns"; /// The name(s) of the AWS log stream(s) an application is writing to. /// /// # Examples /// /// - `logs/main/10838bed-421f-43ef-870a-f43feacbbb5b` -pub const AWS_LOG_STREAM_NAMES: Key = Key::from_static_str("aws.log.stream.names"); +pub const AWS_LOG_STREAM_NAMES: &str = "aws.log.stream.names"; /// The ARN(s) of the AWS log stream(s). /// @@ -208,7 +207,7 @@ pub const AWS_LOG_STREAM_NAMES: Key = Key::from_static_str("aws.log.stream.names /// # Examples /// /// - `arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b` -pub const AWS_LOG_STREAM_ARNS: Key = Key::from_static_str("aws.log.stream.arns"); +pub const AWS_LOG_STREAM_ARNS: &str = "aws.log.stream.arns"; /// The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. /// @@ -216,7 +215,7 @@ pub const AWS_LOG_STREAM_ARNS: Key = Key::from_static_str("aws.log.stream.arns") /// /// - `job-name-xxxx` /// - `sample-job-mdw84` -pub const GCP_CLOUD_RUN_JOB_EXECUTION: Key = Key::from_static_str("gcp.cloud_run.job.execution"); +pub const GCP_CLOUD_RUN_JOB_EXECUTION: &str = "gcp.cloud_run.job.execution"; /// The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. /// @@ -224,7 +223,7 @@ pub const GCP_CLOUD_RUN_JOB_EXECUTION: Key = Key::from_static_str("gcp.cloud_run /// /// - `0` /// - `1` -pub const GCP_CLOUD_RUN_JOB_TASK_INDEX: Key = Key::from_static_str("gcp.cloud_run.job.task_index"); +pub const GCP_CLOUD_RUN_JOB_TASK_INDEX: &str = "gcp.cloud_run.job.task_index"; /// The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). /// @@ -232,7 +231,7 @@ pub const GCP_CLOUD_RUN_JOB_TASK_INDEX: Key = Key::from_static_str("gcp.cloud_ru /// /// - `instance-1` /// - `my-vm-name` -pub const GCP_GCE_INSTANCE_NAME: Key = Key::from_static_str("gcp.gce.instance.name"); +pub const GCP_GCE_INSTANCE_NAME: &str = "gcp.gce.instance.name"; /// The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). /// @@ -240,43 +239,42 @@ pub const GCP_GCE_INSTANCE_NAME: Key = Key::from_static_str("gcp.gce.instance.na /// /// - `my-host1234.example.com` /// - `sample-vm.us-west1-b.c.my-project.internal` -pub const GCP_GCE_INSTANCE_HOSTNAME: Key = Key::from_static_str("gcp.gce.instance.hostname"); +pub const GCP_GCE_INSTANCE_HOSTNAME: &str = "gcp.gce.instance.hostname"; /// Time and date the release was created. /// /// # Examples /// /// - `2022-10-23T18:00:42Z` -pub const HEROKU_RELEASE_CREATION_TIMESTAMP: Key = - Key::from_static_str("heroku.release.creation_timestamp"); +pub const HEROKU_RELEASE_CREATION_TIMESTAMP: &str = "heroku.release.creation_timestamp"; /// Commit hash for the current release. /// /// # Examples /// /// - `e6134959463efd8966b20e75b913cafe3f5ec` -pub const HEROKU_RELEASE_COMMIT: Key = Key::from_static_str("heroku.release.commit"); +pub const HEROKU_RELEASE_COMMIT: &str = "heroku.release.commit"; /// Unique identifier for the application. /// /// # Examples /// /// - `2daa2797-e42b-4624-9322-ec3f968df4da` -pub const HEROKU_APP_ID: Key = Key::from_static_str("heroku.app.id"); +pub const HEROKU_APP_ID: &str = "heroku.app.id"; /// Container name used by container runtime. /// /// # Examples /// /// - `opentelemetry-autoconf` -pub const CONTAINER_NAME: Key = Key::from_static_str("container.name"); +pub const CONTAINER_NAME: &str = "container.name"; /// Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated. /// /// # Examples /// /// - `a3bf90e006b2` -pub const CONTAINER_ID: Key = Key::from_static_str("container.id"); +pub const CONTAINER_ID: &str = "container.id"; /// The container runtime managing this container. /// @@ -285,21 +283,21 @@ pub const CONTAINER_ID: Key = Key::from_static_str("container.id"); /// - `docker` /// - `containerd` /// - `rkt` -pub const CONTAINER_RUNTIME: Key = Key::from_static_str("container.runtime"); +pub const CONTAINER_RUNTIME: &str = "container.runtime"; /// Name of the image the container was built on. /// /// # Examples /// /// - `gcr.io/opentelemetry/operator` -pub const CONTAINER_IMAGE_NAME: Key = Key::from_static_str("container.image.name"); +pub const CONTAINER_IMAGE_NAME: &str = "container.image.name"; /// Container image tag. /// /// # Examples /// /// - `0.1` -pub const CONTAINER_IMAGE_TAG: Key = Key::from_static_str("container.image.tag"); +pub const CONTAINER_IMAGE_TAG: &str = "container.image.tag"; /// Runtime specific image identifier. Usually a hash algorithm followed by a UUID. /// @@ -310,7 +308,7 @@ pub const CONTAINER_IMAGE_TAG: Key = Key::from_static_str("container.image.tag") /// # Examples /// /// - `sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f` -pub const CONTAINER_IMAGE_ID: Key = Key::from_static_str("container.image.id"); +pub const CONTAINER_IMAGE_ID: &str = "container.image.id"; /// The command used to run the container (i.e. the command name). /// @@ -319,21 +317,21 @@ pub const CONTAINER_IMAGE_ID: Key = Key::from_static_str("container.image.id"); /// # Examples /// /// - `otelcontribcol` -pub const CONTAINER_COMMAND: Key = Key::from_static_str("container.command"); +pub const CONTAINER_COMMAND: &str = "container.command"; /// The full command run by the container as a single string representing the full command. /// /// # Examples /// /// - `otelcontribcol --config config.yaml` -pub const CONTAINER_COMMAND_LINE: Key = Key::from_static_str("container.command_line"); +pub const CONTAINER_COMMAND_LINE: &str = "container.command_line"; /// All the command arguments (including the command/executable itself) run by the container. /// /// # Examples /// /// - `otelcontribcol, --config, config.yaml` -pub const CONTAINER_COMMAND_ARGS: Key = Key::from_static_str("container.command_args"); +pub const CONTAINER_COMMAND_ARGS: &str = "container.command_args"; /// Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier). /// @@ -341,7 +339,7 @@ pub const CONTAINER_COMMAND_ARGS: Key = Key::from_static_str("container.command_ /// /// - `staging` /// - `production` -pub const DEPLOYMENT_ENVIRONMENT: Key = Key::from_static_str("deployment.environment"); +pub const DEPLOYMENT_ENVIRONMENT: &str = "deployment.environment"; /// A unique identifier representing the device. /// @@ -350,7 +348,7 @@ pub const DEPLOYMENT_ENVIRONMENT: Key = Key::from_static_str("deployment.environ /// # Examples /// /// - `2ab2916d-a51f-4ac8-80ee-45ac31a28092` -pub const DEVICE_ID: Key = Key::from_static_str("device.id"); +pub const DEVICE_ID: &str = "device.id"; /// The model identifier for the device. /// @@ -360,7 +358,7 @@ pub const DEVICE_ID: Key = Key::from_static_str("device.id"); /// /// - `iPhone3,4` /// - `SM-G920F` -pub const DEVICE_MODEL_IDENTIFIER: Key = Key::from_static_str("device.model.identifier"); +pub const DEVICE_MODEL_IDENTIFIER: &str = "device.model.identifier"; /// The marketing name for the device model. /// @@ -370,7 +368,7 @@ pub const DEVICE_MODEL_IDENTIFIER: Key = Key::from_static_str("device.model.iden /// /// - `iPhone 6s Plus` /// - `Samsung Galaxy S6` -pub const DEVICE_MODEL_NAME: Key = Key::from_static_str("device.model.name"); +pub const DEVICE_MODEL_NAME: &str = "device.model.name"; /// The name of the device manufacturer. /// @@ -380,7 +378,7 @@ pub const DEVICE_MODEL_NAME: Key = Key::from_static_str("device.model.name"); /// /// - `Apple` /// - `Samsung` -pub const DEVICE_MANUFACTURER: Key = Key::from_static_str("device.manufacturer"); +pub const DEVICE_MANUFACTURER: &str = "device.manufacturer"; /// The name of the single function that this runtime instance executes. /// @@ -405,7 +403,7 @@ pub const DEVICE_MANUFACTURER: Key = Key::from_static_str("device.manufacturer") /// /// - `my-function` /// - `myazurefunctionapp/some-function-name` -pub const FAAS_NAME: Key = Key::from_static_str("faas.name"); +pub const FAAS_NAME: &str = "faas.name"; /// The immutable version of the function being executed. /// @@ -423,7 +421,7 @@ pub const FAAS_NAME: Key = Key::from_static_str("faas.name"); /// /// - `26` /// - `pinkfroid-00002` -pub const FAAS_VERSION: Key = Key::from_static_str("faas.version"); +pub const FAAS_VERSION: &str = "faas.version"; /// The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. /// @@ -432,7 +430,7 @@ pub const FAAS_VERSION: Key = Key::from_static_str("faas.version"); /// # Examples /// /// - `2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de` -pub const FAAS_INSTANCE: Key = Key::from_static_str("faas.instance"); +pub const FAAS_INSTANCE: &str = "faas.instance"; /// The amount of memory available to the serverless function converted to Bytes. /// @@ -441,31 +439,31 @@ pub const FAAS_INSTANCE: Key = Key::from_static_str("faas.instance"); /// # Examples /// /// - `134217728` -pub const FAAS_MAX_MEMORY: Key = Key::from_static_str("faas.max_memory"); +pub const FAAS_MAX_MEMORY: &str = "faas.max_memory"; /// Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. /// /// # Examples /// /// - `fdbf79e8af94cb7f9e8df36789187052` -pub const HOST_ID: Key = Key::from_static_str("host.id"); +pub const HOST_ID: &str = "host.id"; /// Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. /// /// # Examples /// /// - `opentelemetry-test` -pub const HOST_NAME: Key = Key::from_static_str("host.name"); +pub const HOST_NAME: &str = "host.name"; /// Type of host. For Cloud, this must be the machine type. /// /// # Examples /// /// - `n1-standard-1` -pub const HOST_TYPE: Key = Key::from_static_str("host.type"); +pub const HOST_TYPE: &str = "host.type"; /// The CPU architecture the host system is running on. -pub const HOST_ARCH: Key = Key::from_static_str("host.arch"); +pub const HOST_ARCH: &str = "host.arch"; /// Name of the VM image or OS install the host was instantiated from. /// @@ -473,28 +471,28 @@ pub const HOST_ARCH: Key = Key::from_static_str("host.arch"); /// /// - `infra-ami-eks-worker-node-7d4ec78312` /// - `CentOS-8-x86_64-1905` -pub const HOST_IMAGE_NAME: Key = Key::from_static_str("host.image.name"); +pub const HOST_IMAGE_NAME: &str = "host.image.name"; /// VM image ID or host OS image ID. For Cloud, this value is from the provider. /// /// # Examples /// /// - `ami-07b06b442921831e5` -pub const HOST_IMAGE_ID: Key = Key::from_static_str("host.image.id"); +pub const HOST_IMAGE_ID: &str = "host.image.id"; /// The version string of the VM image or host OS as defined in [Version Attributes](README.md#version-attributes). /// /// # Examples /// /// - `0.1` -pub const HOST_IMAGE_VERSION: Key = Key::from_static_str("host.image.version"); +pub const HOST_IMAGE_VERSION: &str = "host.image.version"; /// The name of the cluster. /// /// # Examples /// /// - `opentelemetry-cluster` -pub const K8S_CLUSTER_NAME: Key = Key::from_static_str("k8s.cluster.name"); +pub const K8S_CLUSTER_NAME: &str = "k8s.cluster.name"; /// A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. /// @@ -524,49 +522,49 @@ pub const K8S_CLUSTER_NAME: Key = Key::from_static_str("k8s.cluster.name"); /// # Examples /// /// - `218fc5a9-a5f1-4b54-aa05-46717d0ab26d` -pub const K8S_CLUSTER_UID: Key = Key::from_static_str("k8s.cluster.uid"); +pub const K8S_CLUSTER_UID: &str = "k8s.cluster.uid"; /// The name of the Node. /// /// # Examples /// /// - `node-1` -pub const K8S_NODE_NAME: Key = Key::from_static_str("k8s.node.name"); +pub const K8S_NODE_NAME: &str = "k8s.node.name"; /// The UID of the Node. /// /// # Examples /// /// - `1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2` -pub const K8S_NODE_UID: Key = Key::from_static_str("k8s.node.uid"); +pub const K8S_NODE_UID: &str = "k8s.node.uid"; /// The name of the namespace that the pod is running in. /// /// # Examples /// /// - `default` -pub const K8S_NAMESPACE_NAME: Key = Key::from_static_str("k8s.namespace.name"); +pub const K8S_NAMESPACE_NAME: &str = "k8s.namespace.name"; /// The UID of the Pod. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_POD_UID: Key = Key::from_static_str("k8s.pod.uid"); +pub const K8S_POD_UID: &str = "k8s.pod.uid"; /// The name of the Pod. /// /// # Examples /// /// - `opentelemetry-pod-autoconf` -pub const K8S_POD_NAME: Key = Key::from_static_str("k8s.pod.name"); +pub const K8S_POD_NAME: &str = "k8s.pod.name"; /// The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). /// /// # Examples /// /// - `redis` -pub const K8S_CONTAINER_NAME: Key = Key::from_static_str("k8s.container.name"); +pub const K8S_CONTAINER_NAME: &str = "k8s.container.name"; /// Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. /// @@ -574,94 +572,94 @@ pub const K8S_CONTAINER_NAME: Key = Key::from_static_str("k8s.container.name"); /// /// - `0` /// - `2` -pub const K8S_CONTAINER_RESTART_COUNT: Key = Key::from_static_str("k8s.container.restart_count"); +pub const K8S_CONTAINER_RESTART_COUNT: &str = "k8s.container.restart_count"; /// The UID of the ReplicaSet. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_REPLICASET_UID: Key = Key::from_static_str("k8s.replicaset.uid"); +pub const K8S_REPLICASET_UID: &str = "k8s.replicaset.uid"; /// The name of the ReplicaSet. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_REPLICASET_NAME: Key = Key::from_static_str("k8s.replicaset.name"); +pub const K8S_REPLICASET_NAME: &str = "k8s.replicaset.name"; /// The UID of the Deployment. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_DEPLOYMENT_UID: Key = Key::from_static_str("k8s.deployment.uid"); +pub const K8S_DEPLOYMENT_UID: &str = "k8s.deployment.uid"; /// The name of the Deployment. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_DEPLOYMENT_NAME: Key = Key::from_static_str("k8s.deployment.name"); +pub const K8S_DEPLOYMENT_NAME: &str = "k8s.deployment.name"; /// The UID of the StatefulSet. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_STATEFULSET_UID: Key = Key::from_static_str("k8s.statefulset.uid"); +pub const K8S_STATEFULSET_UID: &str = "k8s.statefulset.uid"; /// The name of the StatefulSet. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_STATEFULSET_NAME: Key = Key::from_static_str("k8s.statefulset.name"); +pub const K8S_STATEFULSET_NAME: &str = "k8s.statefulset.name"; /// The UID of the DaemonSet. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_DAEMONSET_UID: Key = Key::from_static_str("k8s.daemonset.uid"); +pub const K8S_DAEMONSET_UID: &str = "k8s.daemonset.uid"; /// The name of the DaemonSet. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_DAEMONSET_NAME: Key = Key::from_static_str("k8s.daemonset.name"); +pub const K8S_DAEMONSET_NAME: &str = "k8s.daemonset.name"; /// The UID of the Job. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_JOB_UID: Key = Key::from_static_str("k8s.job.uid"); +pub const K8S_JOB_UID: &str = "k8s.job.uid"; /// The name of the Job. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_JOB_NAME: Key = Key::from_static_str("k8s.job.name"); +pub const K8S_JOB_NAME: &str = "k8s.job.name"; /// The UID of the CronJob. /// /// # Examples /// /// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` -pub const K8S_CRONJOB_UID: Key = Key::from_static_str("k8s.cronjob.uid"); +pub const K8S_CRONJOB_UID: &str = "k8s.cronjob.uid"; /// The name of the CronJob. /// /// # Examples /// /// - `opentelemetry` -pub const K8S_CRONJOB_NAME: Key = Key::from_static_str("k8s.cronjob.name"); +pub const K8S_CRONJOB_NAME: &str = "k8s.cronjob.name"; /// The operating system type. -pub const OS_TYPE: Key = Key::from_static_str("os.type"); +pub const OS_TYPE: &str = "os.type"; /// Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. /// @@ -669,7 +667,7 @@ pub const OS_TYPE: Key = Key::from_static_str("os.type"); /// /// - `Microsoft Windows [Version 10.0.18363.778]` /// - `Ubuntu 18.04.1 LTS` -pub const OS_DESCRIPTION: Key = Key::from_static_str("os.description"); +pub const OS_DESCRIPTION: &str = "os.description"; /// Human readable operating system name. /// @@ -678,7 +676,7 @@ pub const OS_DESCRIPTION: Key = Key::from_static_str("os.description"); /// - `iOS` /// - `Android` /// - `Ubuntu` -pub const OS_NAME: Key = Key::from_static_str("os.name"); +pub const OS_NAME: &str = "os.name"; /// The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). /// @@ -686,49 +684,49 @@ pub const OS_NAME: Key = Key::from_static_str("os.name"); /// /// - `14.2.1` /// - `18.04.1` -pub const OS_VERSION: Key = Key::from_static_str("os.version"); +pub const OS_VERSION: &str = "os.version"; /// Process identifier (PID). /// /// # Examples /// /// - `1234` -pub const PROCESS_PID: Key = Key::from_static_str("process.pid"); +pub const PROCESS_PID: &str = "process.pid"; /// Parent Process identifier (PID). /// /// # Examples /// /// - `111` -pub const PROCESS_PARENT_PID: Key = Key::from_static_str("process.parent_pid"); +pub const PROCESS_PARENT_PID: &str = "process.parent_pid"; /// The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`. /// /// # Examples /// /// - `otelcol` -pub const PROCESS_EXECUTABLE_NAME: Key = Key::from_static_str("process.executable.name"); +pub const PROCESS_EXECUTABLE_NAME: &str = "process.executable.name"; /// The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. /// /// # Examples /// /// - `/usr/bin/cmd/otelcol` -pub const PROCESS_EXECUTABLE_PATH: Key = Key::from_static_str("process.executable.path"); +pub const PROCESS_EXECUTABLE_PATH: &str = "process.executable.path"; /// The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. /// /// # Examples /// /// - `cmd/otelcol` -pub const PROCESS_COMMAND: Key = Key::from_static_str("process.command"); +pub const PROCESS_COMMAND: &str = "process.command"; /// The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. /// /// # Examples /// /// - `C:\cmd\otecol --config="my directory\config.yaml"` -pub const PROCESS_COMMAND_LINE: Key = Key::from_static_str("process.command_line"); +pub const PROCESS_COMMAND_LINE: &str = "process.command_line"; /// All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. /// @@ -736,35 +734,35 @@ pub const PROCESS_COMMAND_LINE: Key = Key::from_static_str("process.command_line /// /// - `cmd/otecol` /// - `--config=config.yaml` -pub const PROCESS_COMMAND_ARGS: Key = Key::from_static_str("process.command_args"); +pub const PROCESS_COMMAND_ARGS: &str = "process.command_args"; /// The username of the user that owns the process. /// /// # Examples /// /// - `root` -pub const PROCESS_OWNER: Key = Key::from_static_str("process.owner"); +pub const PROCESS_OWNER: &str = "process.owner"; /// The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler. /// /// # Examples /// /// - `OpenJDK Runtime Environment` -pub const PROCESS_RUNTIME_NAME: Key = Key::from_static_str("process.runtime.name"); +pub const PROCESS_RUNTIME_NAME: &str = "process.runtime.name"; /// The version of the runtime of this process, as returned by the runtime without modification. /// /// # Examples /// /// - `14.0.2` -pub const PROCESS_RUNTIME_VERSION: Key = Key::from_static_str("process.runtime.version"); +pub const PROCESS_RUNTIME_VERSION: &str = "process.runtime.version"; /// An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. /// /// # Examples /// /// - `Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0` -pub const PROCESS_RUNTIME_DESCRIPTION: Key = Key::from_static_str("process.runtime.description"); +pub const PROCESS_RUNTIME_DESCRIPTION: &str = "process.runtime.description"; /// Logical name of the service. /// @@ -773,7 +771,7 @@ pub const PROCESS_RUNTIME_DESCRIPTION: Key = Key::from_static_str("process.runti /// # Examples /// /// - `shoppingcart` -pub const SERVICE_NAME: Key = Key::from_static_str("service.name"); +pub const SERVICE_NAME: &str = "service.name"; /// The version string of the service API or implementation. The format is not defined by these conventions. /// @@ -781,7 +779,7 @@ pub const SERVICE_NAME: Key = Key::from_static_str("service.name"); /// /// - `2.0.0` /// - `a01dbef8a` -pub const SERVICE_VERSION: Key = Key::from_static_str("service.version"); +pub const SERVICE_VERSION: &str = "service.version"; /// A namespace for `service.name`. /// @@ -790,7 +788,7 @@ pub const SERVICE_VERSION: Key = Key::from_static_str("service.version"); /// # Examples /// /// - `Shop` -pub const SERVICE_NAMESPACE: Key = Key::from_static_str("service.namespace"); +pub const SERVICE_NAMESPACE: &str = "service.namespace"; /// The string ID of the service instance. /// @@ -800,7 +798,7 @@ pub const SERVICE_NAMESPACE: Key = Key::from_static_str("service.namespace"); /// /// - `my-k8s-pod-deployment-1` /// - `627cc493-f310-47de-96bd-71410b7dec09` -pub const SERVICE_INSTANCE_ID: Key = Key::from_static_str("service.instance.id"); +pub const SERVICE_INSTANCE_ID: &str = "service.instance.id"; /// The name of the telemetry SDK as defined above. /// @@ -814,59 +812,59 @@ pub const SERVICE_INSTANCE_ID: Key = Key::from_static_str("service.instance.id") /// # Examples /// /// - `opentelemetry` -pub const TELEMETRY_SDK_NAME: Key = Key::from_static_str("telemetry.sdk.name"); +pub const TELEMETRY_SDK_NAME: &str = "telemetry.sdk.name"; /// The language of the telemetry SDK. -pub const TELEMETRY_SDK_LANGUAGE: Key = Key::from_static_str("telemetry.sdk.language"); +pub const TELEMETRY_SDK_LANGUAGE: &str = "telemetry.sdk.language"; /// The version string of the telemetry SDK. /// /// # Examples /// /// - `1.2.3` -pub const TELEMETRY_SDK_VERSION: Key = Key::from_static_str("telemetry.sdk.version"); +pub const TELEMETRY_SDK_VERSION: &str = "telemetry.sdk.version"; /// The version string of the auto instrumentation agent, if used. /// /// # Examples /// /// - `1.2.3` -pub const TELEMETRY_AUTO_VERSION: Key = Key::from_static_str("telemetry.auto.version"); +pub const TELEMETRY_AUTO_VERSION: &str = "telemetry.auto.version"; /// The name of the web engine. /// /// # Examples /// /// - `WildFly` -pub const WEBENGINE_NAME: Key = Key::from_static_str("webengine.name"); +pub const WEBENGINE_NAME: &str = "webengine.name"; /// The version of the web engine. /// /// # Examples /// /// - `21.0.0` -pub const WEBENGINE_VERSION: Key = Key::from_static_str("webengine.version"); +pub const WEBENGINE_VERSION: &str = "webengine.version"; /// Additional description of the web engine (e.g. detailed version and edition information). /// /// # Examples /// /// - `WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final` -pub const WEBENGINE_DESCRIPTION: Key = Key::from_static_str("webengine.description"); +pub const WEBENGINE_DESCRIPTION: &str = "webengine.description"; /// The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). /// /// # Examples /// /// - `io.opentelemetry.contrib.mongodb` -pub const OTEL_SCOPE_NAME: Key = Key::from_static_str("otel.scope.name"); +pub const OTEL_SCOPE_NAME: &str = "otel.scope.name"; /// The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). /// /// # Examples /// /// - `1.0.0` -pub const OTEL_SCOPE_VERSION: Key = Key::from_static_str("otel.scope.version"); +pub const OTEL_SCOPE_VERSION: &str = "otel.scope.version"; /// Deprecated, use the `otel.scope.name` attribute. /// @@ -874,7 +872,7 @@ pub const OTEL_SCOPE_VERSION: Key = Key::from_static_str("otel.scope.version"); /// /// - `io.opentelemetry.contrib.mongodb` #[deprecated] -pub const OTEL_LIBRARY_NAME: Key = Key::from_static_str("otel.library.name"); +pub const OTEL_LIBRARY_NAME: &str = "otel.library.name"; /// Deprecated, use the `otel.scope.version` attribute. /// @@ -882,4 +880,4 @@ pub const OTEL_LIBRARY_NAME: Key = Key::from_static_str("otel.library.name"); /// /// - `1.0.0` #[deprecated] -pub const OTEL_LIBRARY_VERSION: Key = Key::from_static_str("otel.library.version"); +pub const OTEL_LIBRARY_VERSION: &str = "otel.library.version"; diff --git a/opentelemetry-semantic-conventions/src/trace.rs b/opentelemetry-semantic-conventions/src/trace.rs index 17eb2a1a4b..5612cd1a86 100644 --- a/opentelemetry-semantic-conventions/src/trace.rs +++ b/opentelemetry-semantic-conventions/src/trace.rs @@ -14,21 +14,20 @@ //! ## Usage //! //! ``` +//! use opentelemetry::KeyValue; //! use opentelemetry::{global, trace::Tracer as _}; -//! use opentelemetry_semantic_conventions as semcov; +//! use opentelemetry_semantic_conventions as semconv; //! //! let tracer = global::tracer("my-component"); //! let _span = tracer //! .span_builder("span-name") //! .with_attributes(vec![ -//! semcov::trace::NET_PEER_NAME.string("example.org"), -//! semcov::trace::NET_PEER_PORT.i64(80), +//! KeyValue::new(semconv::trace::NET_PEER_NAME, "example.org"), +//! KeyValue::new(semconv::trace::NET_PEER_PORT, 80i64), //! ]) //! .start(&tracer); //! ``` -use opentelemetry::Key; - /// Client address - unix domain socket name, IPv4 or IPv6 address. /// /// When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent client address behind any intermediaries (e.g. proxies) if it's available. @@ -37,7 +36,7 @@ use opentelemetry::Key; /// /// - `/tmp/my.sock` /// - `10.1.2.80` -pub const CLIENT_ADDRESS: Key = Key::from_static_str("client.address"); +pub const CLIENT_ADDRESS: &str = "client.address"; /// Client port number. /// @@ -46,7 +45,7 @@ pub const CLIENT_ADDRESS: Key = Key::from_static_str("client.address"); /// # Examples /// /// - `65123` -pub const CLIENT_PORT: Key = Key::from_static_str("client.port"); +pub const CLIENT_PORT: &str = "client.port"; /// Immediate client peer address - unix domain socket name, IPv4 or IPv6 address. /// @@ -54,14 +53,14 @@ pub const CLIENT_PORT: Key = Key::from_static_str("client.port"); /// /// - `/tmp/my.sock` /// - `127.0.0.1` -pub const CLIENT_SOCKET_ADDRESS: Key = Key::from_static_str("client.socket.address"); +pub const CLIENT_SOCKET_ADDRESS: &str = "client.socket.address"; /// Immediate client peer port number. /// /// # Examples /// /// - `35555` -pub const CLIENT_SOCKET_PORT: Key = Key::from_static_str("client.socket.port"); +pub const CLIENT_SOCKET_PORT: &str = "client.socket.port"; /// Deprecated, use `http.request.method` instead. /// @@ -71,7 +70,7 @@ pub const CLIENT_SOCKET_PORT: Key = Key::from_static_str("client.socket.port"); /// - `POST` /// - `HEAD` #[deprecated] -pub const HTTP_METHOD: Key = Key::from_static_str("http.method"); +pub const HTTP_METHOD: &str = "http.method"; /// Deprecated, use `http.response.status_code` instead. /// @@ -79,7 +78,7 @@ pub const HTTP_METHOD: Key = Key::from_static_str("http.method"); /// /// - `200` #[deprecated] -pub const HTTP_STATUS_CODE: Key = Key::from_static_str("http.status_code"); +pub const HTTP_STATUS_CODE: &str = "http.status_code"; /// Deprecated, use `url.scheme` instead. /// @@ -88,7 +87,7 @@ pub const HTTP_STATUS_CODE: Key = Key::from_static_str("http.status_code"); /// - `http` /// - `https` #[deprecated] -pub const HTTP_SCHEME: Key = Key::from_static_str("http.scheme"); +pub const HTTP_SCHEME: &str = "http.scheme"; /// Deprecated, use `url.full` instead. /// @@ -96,7 +95,7 @@ pub const HTTP_SCHEME: Key = Key::from_static_str("http.scheme"); /// /// - `https://www.foo.bar/search?q=OpenTelemetry#SemConv` #[deprecated] -pub const HTTP_URL: Key = Key::from_static_str("http.url"); +pub const HTTP_URL: &str = "http.url"; /// Deprecated, use `url.path` and `url.query` instead. /// @@ -104,7 +103,7 @@ pub const HTTP_URL: Key = Key::from_static_str("http.url"); /// /// - `/search?q=OpenTelemetry#SemConv` #[deprecated] -pub const HTTP_TARGET: Key = Key::from_static_str("http.target"); +pub const HTTP_TARGET: &str = "http.target"; /// Deprecated, use `http.request.body.size` instead. /// @@ -112,7 +111,7 @@ pub const HTTP_TARGET: Key = Key::from_static_str("http.target"); /// /// - `3495` #[deprecated] -pub const HTTP_REQUEST_CONTENT_LENGTH: Key = Key::from_static_str("http.request_content_length"); +pub const HTTP_REQUEST_CONTENT_LENGTH: &str = "http.request_content_length"; /// Deprecated, use `http.response.body.size` instead. /// @@ -120,7 +119,7 @@ pub const HTTP_REQUEST_CONTENT_LENGTH: Key = Key::from_static_str("http.request_ /// /// - `3495` #[deprecated] -pub const HTTP_RESPONSE_CONTENT_LENGTH: Key = Key::from_static_str("http.response_content_length"); +pub const HTTP_RESPONSE_CONTENT_LENGTH: &str = "http.response_content_length"; /// Deprecated, use `server.socket.domain` on client spans. /// @@ -128,7 +127,7 @@ pub const HTTP_RESPONSE_CONTENT_LENGTH: Key = Key::from_static_str("http.respons /// /// - `/var/my.sock` #[deprecated] -pub const NET_SOCK_PEER_NAME: Key = Key::from_static_str("net.sock.peer.name"); +pub const NET_SOCK_PEER_NAME: &str = "net.sock.peer.name"; /// Deprecated, use `server.socket.address` on client spans and `client.socket.address` on server spans. /// @@ -136,7 +135,7 @@ pub const NET_SOCK_PEER_NAME: Key = Key::from_static_str("net.sock.peer.name"); /// /// - `192.168.0.1` #[deprecated] -pub const NET_SOCK_PEER_ADDR: Key = Key::from_static_str("net.sock.peer.addr"); +pub const NET_SOCK_PEER_ADDR: &str = "net.sock.peer.addr"; /// Deprecated, use `server.socket.port` on client spans and `client.socket.port` on server spans. /// @@ -144,7 +143,7 @@ pub const NET_SOCK_PEER_ADDR: Key = Key::from_static_str("net.sock.peer.addr"); /// /// - `65531` #[deprecated] -pub const NET_SOCK_PEER_PORT: Key = Key::from_static_str("net.sock.peer.port"); +pub const NET_SOCK_PEER_PORT: &str = "net.sock.peer.port"; /// Deprecated, use `server.address` on client spans and `client.address` on server spans. /// @@ -152,7 +151,7 @@ pub const NET_SOCK_PEER_PORT: Key = Key::from_static_str("net.sock.peer.port"); /// /// - `example.com` #[deprecated] -pub const NET_PEER_NAME: Key = Key::from_static_str("net.peer.name"); +pub const NET_PEER_NAME: &str = "net.peer.name"; /// Deprecated, use `server.port` on client spans and `client.port` on server spans. /// @@ -160,7 +159,7 @@ pub const NET_PEER_NAME: Key = Key::from_static_str("net.peer.name"); /// /// - `8080` #[deprecated] -pub const NET_PEER_PORT: Key = Key::from_static_str("net.peer.port"); +pub const NET_PEER_PORT: &str = "net.peer.port"; /// Deprecated, use `server.address`. /// @@ -168,7 +167,7 @@ pub const NET_PEER_PORT: Key = Key::from_static_str("net.peer.port"); /// /// - `example.com` #[deprecated] -pub const NET_HOST_NAME: Key = Key::from_static_str("net.host.name"); +pub const NET_HOST_NAME: &str = "net.host.name"; /// Deprecated, use `server.port`. /// @@ -176,7 +175,7 @@ pub const NET_HOST_NAME: Key = Key::from_static_str("net.host.name"); /// /// - `8080` #[deprecated] -pub const NET_HOST_PORT: Key = Key::from_static_str("net.host.port"); +pub const NET_HOST_PORT: &str = "net.host.port"; /// Deprecated, use `server.socket.address`. /// @@ -184,7 +183,7 @@ pub const NET_HOST_PORT: Key = Key::from_static_str("net.host.port"); /// /// - `/var/my.sock` #[deprecated] -pub const NET_SOCK_HOST_ADDR: Key = Key::from_static_str("net.sock.host.addr"); +pub const NET_SOCK_HOST_ADDR: &str = "net.sock.host.addr"; /// Deprecated, use `server.socket.port`. /// @@ -192,11 +191,11 @@ pub const NET_SOCK_HOST_ADDR: Key = Key::from_static_str("net.sock.host.addr"); /// /// - `8080` #[deprecated] -pub const NET_SOCK_HOST_PORT: Key = Key::from_static_str("net.sock.host.port"); +pub const NET_SOCK_HOST_PORT: &str = "net.sock.host.port"; /// Deprecated, use `network.transport`. #[deprecated] -pub const NET_TRANSPORT: Key = Key::from_static_str("net.transport"); +pub const NET_TRANSPORT: &str = "net.transport"; /// Deprecated, use `network.protocol.name`. /// @@ -206,7 +205,7 @@ pub const NET_TRANSPORT: Key = Key::from_static_str("net.transport"); /// - `http` /// - `mqtt` #[deprecated] -pub const NET_PROTOCOL_NAME: Key = Key::from_static_str("net.protocol.name"); +pub const NET_PROTOCOL_NAME: &str = "net.protocol.name"; /// Deprecated, use `network.protocol.version`. /// @@ -214,11 +213,11 @@ pub const NET_PROTOCOL_NAME: Key = Key::from_static_str("net.protocol.name"); /// /// - `3.1.1` #[deprecated] -pub const NET_PROTOCOL_VERSION: Key = Key::from_static_str("net.protocol.version"); +pub const NET_PROTOCOL_VERSION: &str = "net.protocol.version"; /// Deprecated, use `network.transport` and `network.type`. #[deprecated] -pub const NET_SOCK_FAMILY: Key = Key::from_static_str("net.sock.family"); +pub const NET_SOCK_FAMILY: &str = "net.sock.family"; /// The domain name of the destination system. /// @@ -227,14 +226,14 @@ pub const NET_SOCK_FAMILY: Key = Key::from_static_str("net.sock.family"); /// # Examples /// /// - `foo.example.com` -pub const DESTINATION_DOMAIN: Key = Key::from_static_str("destination.domain"); +pub const DESTINATION_DOMAIN: &str = "destination.domain"; /// Peer address, for example IP address or UNIX socket name. /// /// # Examples /// /// - `10.5.3.2` -pub const DESTINATION_ADDRESS: Key = Key::from_static_str("destination.address"); +pub const DESTINATION_ADDRESS: &str = "destination.address"; /// Peer port number. /// @@ -242,7 +241,7 @@ pub const DESTINATION_ADDRESS: Key = Key::from_static_str("destination.address") /// /// - `3389` /// - `2888` -pub const DESTINATION_PORT: Key = Key::from_static_str("destination.port"); +pub const DESTINATION_PORT: &str = "destination.port"; /// The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. /// @@ -250,7 +249,7 @@ pub const DESTINATION_PORT: Key = Key::from_static_str("destination.port"); /// /// - `java.net.ConnectException` /// - `OSError` -pub const EXCEPTION_TYPE: Key = Key::from_static_str("exception.type"); +pub const EXCEPTION_TYPE: &str = "exception.type"; /// The exception message. /// @@ -258,14 +257,14 @@ pub const EXCEPTION_TYPE: Key = Key::from_static_str("exception.type"); /// /// - `Division by zero` /// - `Can't convert 'int' object to str implicitly` -pub const EXCEPTION_MESSAGE: Key = Key::from_static_str("exception.message"); +pub const EXCEPTION_MESSAGE: &str = "exception.message"; /// A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. /// /// # Examples /// /// - `Exception in thread "main" java.lang.RuntimeException: Test exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at com.example.GenerateTrace.main(GenerateTrace.java:5)` -pub const EXCEPTION_STACKTRACE: Key = Key::from_static_str("exception.stacktrace"); +pub const EXCEPTION_STACKTRACE: &str = "exception.stacktrace"; /// HTTP request method. /// @@ -290,14 +289,14 @@ pub const EXCEPTION_STACKTRACE: Key = Key::from_static_str("exception.stacktrace /// - `GET` /// - `POST` /// - `HEAD` -pub const HTTP_REQUEST_METHOD: Key = Key::from_static_str("http.request.method"); +pub const HTTP_REQUEST_METHOD: &str = "http.request.method"; /// [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). /// /// # Examples /// /// - `200` -pub const HTTP_RESPONSE_STATUS_CODE: Key = Key::from_static_str("http.response.status_code"); +pub const HTTP_RESPONSE_STATUS_CODE: &str = "http.response.status_code"; /// The matched route (path template in the format used by the respective server framework). See note below. /// @@ -308,7 +307,7 @@ pub const HTTP_RESPONSE_STATUS_CODE: Key = Key::from_static_str("http.response.s /// /// - `/users/:userID?` /// - `{controller}/{action}/{id?}` -pub const HTTP_ROUTE: Key = Key::from_static_str("http.route"); +pub const HTTP_ROUTE: &str = "http.route"; /// The name identifies the event. /// @@ -316,13 +315,13 @@ pub const HTTP_ROUTE: Key = Key::from_static_str("http.route"); /// /// - `click` /// - `exception` -pub const EVENT_NAME: Key = Key::from_static_str("event.name"); +pub const EVENT_NAME: &str = "event.name"; /// The domain identifies the business context for the events. /// /// Events across different domains may have same `event.name`, yet be /// unrelated events. -pub const EVENT_DOMAIN: Key = Key::from_static_str("event.domain"); +pub const EVENT_DOMAIN: &str = "event.domain"; /// A unique identifier for the Log Record. /// @@ -332,38 +331,38 @@ pub const EVENT_DOMAIN: Key = Key::from_static_str("event.domain"); /// # Examples /// /// - `01ARZ3NDEKTSV4RRFFQ69G5FAV` -pub const LOG_RECORD_UID: Key = Key::from_static_str("log.record.uid"); +pub const LOG_RECORD_UID: &str = "log.record.uid"; /// The stream associated with the log. See below for a list of well-known values. -pub const LOG_IOSTREAM: Key = Key::from_static_str("log.iostream"); +pub const LOG_IOSTREAM: &str = "log.iostream"; /// The basename of the file. /// /// # Examples /// /// - `audit.log` -pub const LOG_FILE_NAME: Key = Key::from_static_str("log.file.name"); +pub const LOG_FILE_NAME: &str = "log.file.name"; /// The full path to the file. /// /// # Examples /// /// - `/var/log/mysql/audit.log` -pub const LOG_FILE_PATH: Key = Key::from_static_str("log.file.path"); +pub const LOG_FILE_PATH: &str = "log.file.path"; /// The basename of the file, with symlinks resolved. /// /// # Examples /// /// - `uuid.log` -pub const LOG_FILE_NAME_RESOLVED: Key = Key::from_static_str("log.file.name_resolved"); +pub const LOG_FILE_NAME_RESOLVED: &str = "log.file.name_resolved"; /// The full path to the file, with symlinks resolved. /// /// # Examples /// /// - `/var/lib/docker/uuid.log` -pub const LOG_FILE_PATH_RESOLVED: Key = Key::from_static_str("log.file.path_resolved"); +pub const LOG_FILE_PATH_RESOLVED: &str = "log.file.path_resolved"; /// The type of memory. /// @@ -371,7 +370,7 @@ pub const LOG_FILE_PATH_RESOLVED: Key = Key::from_static_str("log.file.path_reso /// /// - `heap` /// - `non_heap` -pub const TYPE: Key = Key::from_static_str("type"); +pub const TYPE: &str = "type"; /// Name of the memory pool. /// @@ -382,14 +381,14 @@ pub const TYPE: Key = Key::from_static_str("type"); /// - `G1 Old Gen` /// - `G1 Eden space` /// - `G1 Survivor Space` -pub const POOL: Key = Key::from_static_str("pool"); +pub const POOL: &str = "pool"; /// Logical server hostname, matches server FQDN if available, and IP or socket address if FQDN is not known. /// /// # Examples /// /// - `example.com` -pub const SERVER_ADDRESS: Key = Key::from_static_str("server.address"); +pub const SERVER_ADDRESS: &str = "server.address"; /// Logical server port number. /// @@ -398,7 +397,7 @@ pub const SERVER_ADDRESS: Key = Key::from_static_str("server.address"); /// - `80` /// - `8080` /// - `443` -pub const SERVER_PORT: Key = Key::from_static_str("server.port"); +pub const SERVER_PORT: &str = "server.port"; /// The domain name of an immediate peer. /// @@ -407,21 +406,21 @@ pub const SERVER_PORT: Key = Key::from_static_str("server.port"); /// # Examples /// /// - `proxy.example.com` -pub const SERVER_SOCKET_DOMAIN: Key = Key::from_static_str("server.socket.domain"); +pub const SERVER_SOCKET_DOMAIN: &str = "server.socket.domain"; /// Physical server IP address or Unix socket address. If set from the client, should simply use the socket's peer address, and not attempt to find any actual server IP (i.e., if set from client, this may represent some proxy server instead of the logical server). /// /// # Examples /// /// - `10.5.3.2` -pub const SERVER_SOCKET_ADDRESS: Key = Key::from_static_str("server.socket.address"); +pub const SERVER_SOCKET_ADDRESS: &str = "server.socket.address"; /// Physical server port. /// /// # Examples /// /// - `16456` -pub const SERVER_SOCKET_PORT: Key = Key::from_static_str("server.socket.port"); +pub const SERVER_SOCKET_PORT: &str = "server.socket.port"; /// The domain name of the source system. /// @@ -430,14 +429,14 @@ pub const SERVER_SOCKET_PORT: Key = Key::from_static_str("server.socket.port"); /// # Examples /// /// - `foo.example.com` -pub const SOURCE_DOMAIN: Key = Key::from_static_str("source.domain"); +pub const SOURCE_DOMAIN: &str = "source.domain"; /// Source address, for example IP address or Unix socket name. /// /// # Examples /// /// - `10.5.3.2` -pub const SOURCE_ADDRESS: Key = Key::from_static_str("source.address"); +pub const SOURCE_ADDRESS: &str = "source.address"; /// Source port number. /// @@ -445,7 +444,7 @@ pub const SOURCE_ADDRESS: Key = Key::from_static_str("source.address"); /// /// - `3389` /// - `2888` -pub const SOURCE_PORT: Key = Key::from_static_str("source.port"); +pub const SOURCE_PORT: &str = "source.port"; /// The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). /// @@ -454,7 +453,7 @@ pub const SOURCE_PORT: Key = Key::from_static_str("source.port"); /// # Examples /// /// - `arn:aws:lambda:us-east-1:123456:function:myfunction:myalias` -pub const AWS_LAMBDA_INVOKED_ARN: Key = Key::from_static_str("aws.lambda.invoked_arn"); +pub const AWS_LAMBDA_INVOKED_ARN: &str = "aws.lambda.invoked_arn"; /// The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. /// @@ -462,7 +461,7 @@ pub const AWS_LAMBDA_INVOKED_ARN: Key = Key::from_static_str("aws.lambda.invoked /// /// - `123e4567-e89b-12d3-a456-426614174000` /// - `0001` -pub const CLOUDEVENTS_EVENT_ID: Key = Key::from_static_str("cloudevents.event_id"); +pub const CLOUDEVENTS_EVENT_ID: &str = "cloudevents.event_id"; /// The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. /// @@ -471,15 +470,14 @@ pub const CLOUDEVENTS_EVENT_ID: Key = Key::from_static_str("cloudevents.event_id /// - `https://github.com/cloudevents` /// - `/cloudevents/spec/pull/123` /// - `my-service` -pub const CLOUDEVENTS_EVENT_SOURCE: Key = Key::from_static_str("cloudevents.event_source"); +pub const CLOUDEVENTS_EVENT_SOURCE: &str = "cloudevents.event_source"; /// The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. /// /// # Examples /// /// - `1.0` -pub const CLOUDEVENTS_EVENT_SPEC_VERSION: Key = - Key::from_static_str("cloudevents.event_spec_version"); +pub const CLOUDEVENTS_EVENT_SPEC_VERSION: &str = "cloudevents.event_spec_version"; /// The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. /// @@ -487,29 +485,29 @@ pub const CLOUDEVENTS_EVENT_SPEC_VERSION: Key = /// /// - `com.github.pull_request.opened` /// - `com.example.object.deleted.v2` -pub const CLOUDEVENTS_EVENT_TYPE: Key = Key::from_static_str("cloudevents.event_type"); +pub const CLOUDEVENTS_EVENT_TYPE: &str = "cloudevents.event_type"; /// The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). /// /// # Examples /// /// - `mynewfile.jpg` -pub const CLOUDEVENTS_EVENT_SUBJECT: Key = Key::from_static_str("cloudevents.event_subject"); +pub const CLOUDEVENTS_EVENT_SUBJECT: &str = "cloudevents.event_subject"; /// Parent-child Reference type. /// /// The causal relationship between a child Span and a parent Span. -pub const OPENTRACING_REF_TYPE: Key = Key::from_static_str("opentracing.ref_type"); +pub const OPENTRACING_REF_TYPE: &str = "opentracing.ref_type"; /// An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers. -pub const DB_SYSTEM: Key = Key::from_static_str("db.system"); +pub const DB_SYSTEM: &str = "db.system"; /// The connection string used to connect to the database. It is recommended to remove embedded credentials. /// /// # Examples /// /// - `Server=(localdb)\v11.0;Integrated Security=true;` -pub const DB_CONNECTION_STRING: Key = Key::from_static_str("db.connection_string"); +pub const DB_CONNECTION_STRING: &str = "db.connection_string"; /// Username for accessing the database. /// @@ -517,7 +515,7 @@ pub const DB_CONNECTION_STRING: Key = Key::from_static_str("db.connection_string /// /// - `readonly_user` /// - `reporting_user` -pub const DB_USER: Key = Key::from_static_str("db.user"); +pub const DB_USER: &str = "db.user"; /// The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect. /// @@ -525,7 +523,7 @@ pub const DB_USER: Key = Key::from_static_str("db.user"); /// /// - `org.postgresql.Driver` /// - `com.microsoft.sqlserver.jdbc.SQLServerDriver` -pub const DB_JDBC_DRIVER_CLASSNAME: Key = Key::from_static_str("db.jdbc.driver_classname"); +pub const DB_JDBC_DRIVER_CLASSNAME: &str = "db.jdbc.driver_classname"; /// This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails). /// @@ -535,7 +533,7 @@ pub const DB_JDBC_DRIVER_CLASSNAME: Key = Key::from_static_str("db.jdbc.driver_c /// /// - `customers` /// - `main` -pub const DB_NAME: Key = Key::from_static_str("db.name"); +pub const DB_NAME: &str = "db.name"; /// The database statement being executed. /// @@ -543,7 +541,7 @@ pub const DB_NAME: Key = Key::from_static_str("db.name"); /// /// - `SELECT * FROM wuser_table` /// - `SET mykey "WuValue"` -pub const DB_STATEMENT: Key = Key::from_static_str("db.statement"); +pub const DB_STATEMENT: &str = "db.statement"; /// The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword. /// @@ -554,7 +552,7 @@ pub const DB_STATEMENT: Key = Key::from_static_str("db.statement"); /// - `findAndModify` /// - `HMSET` /// - `SELECT` -pub const DB_OPERATION: Key = Key::from_static_str("db.operation"); +pub const DB_OPERATION: &str = "db.operation"; /// The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance. /// @@ -563,18 +561,17 @@ pub const DB_OPERATION: Key = Key::from_static_str("db.operation"); /// # Examples /// /// - `MSSQLSERVER` -pub const DB_MSSQL_INSTANCE_NAME: Key = Key::from_static_str("db.mssql.instance_name"); +pub const DB_MSSQL_INSTANCE_NAME: &str = "db.mssql.instance_name"; /// The fetch size used for paging, i.e. how many rows will be returned at once. /// /// # Examples /// /// - `5000` -pub const DB_CASSANDRA_PAGE_SIZE: Key = Key::from_static_str("db.cassandra.page_size"); +pub const DB_CASSANDRA_PAGE_SIZE: &str = "db.cassandra.page_size"; /// The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). -pub const DB_CASSANDRA_CONSISTENCY_LEVEL: Key = - Key::from_static_str("db.cassandra.consistency_level"); +pub const DB_CASSANDRA_CONSISTENCY_LEVEL: &str = "db.cassandra.consistency_level"; /// The name of the primary table that the operation is acting upon, including the keyspace name (if applicable). /// @@ -583,10 +580,10 @@ pub const DB_CASSANDRA_CONSISTENCY_LEVEL: Key = /// # Examples /// /// - `mytable` -pub const DB_CASSANDRA_TABLE: Key = Key::from_static_str("db.cassandra.table"); +pub const DB_CASSANDRA_TABLE: &str = "db.cassandra.table"; /// Whether or not the query is idempotent. -pub const DB_CASSANDRA_IDEMPOTENCE: Key = Key::from_static_str("db.cassandra.idempotence"); +pub const DB_CASSANDRA_IDEMPOTENCE: &str = "db.cassandra.idempotence"; /// The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. /// @@ -594,22 +591,22 @@ pub const DB_CASSANDRA_IDEMPOTENCE: Key = Key::from_static_str("db.cassandra.ide /// /// - `0` /// - `2` -pub const DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: Key = - Key::from_static_str("db.cassandra.speculative_execution_count"); +pub const DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: &str = + "db.cassandra.speculative_execution_count"; /// The ID of the coordinating node for a query. /// /// # Examples /// /// - `be13faa2-8574-4d71-926d-27f16cf8a7af` -pub const DB_CASSANDRA_COORDINATOR_ID: Key = Key::from_static_str("db.cassandra.coordinator.id"); +pub const DB_CASSANDRA_COORDINATOR_ID: &str = "db.cassandra.coordinator.id"; /// The data center of the coordinating node for a query. /// /// # Examples /// /// - `us-west-2` -pub const DB_CASSANDRA_COORDINATOR_DC: Key = Key::from_static_str("db.cassandra.coordinator.dc"); +pub const DB_CASSANDRA_COORDINATOR_DC: &str = "db.cassandra.coordinator.dc"; /// The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute. /// @@ -618,7 +615,7 @@ pub const DB_CASSANDRA_COORDINATOR_DC: Key = Key::from_static_str("db.cassandra. /// - `0` /// - `1` /// - `15` -pub const DB_REDIS_DATABASE_INDEX: Key = Key::from_static_str("db.redis.database_index"); +pub const DB_REDIS_DATABASE_INDEX: &str = "db.redis.database_index"; /// The collection being accessed within the database stated in `db.name`. /// @@ -626,7 +623,7 @@ pub const DB_REDIS_DATABASE_INDEX: Key = Key::from_static_str("db.redis.database /// /// - `customers` /// - `products` -pub const DB_MONGODB_COLLECTION: Key = Key::from_static_str("db.mongodb.collection"); +pub const DB_MONGODB_COLLECTION: &str = "db.mongodb.collection"; /// The name of the primary table that the operation is acting upon, including the database name (if applicable). /// @@ -636,31 +633,30 @@ pub const DB_MONGODB_COLLECTION: Key = Key::from_static_str("db.mongodb.collecti /// /// - `public.users` /// - `customers` -pub const DB_SQL_TABLE: Key = Key::from_static_str("db.sql.table"); +pub const DB_SQL_TABLE: &str = "db.sql.table"; /// Unique Cosmos client instance id. /// /// # Examples /// /// - `3ba4827d-4422-483f-b59f-85b74211c11d` -pub const DB_COSMOSDB_CLIENT_ID: Key = Key::from_static_str("db.cosmosdb.client_id"); +pub const DB_COSMOSDB_CLIENT_ID: &str = "db.cosmosdb.client_id"; /// CosmosDB Operation Type. -pub const DB_COSMOSDB_OPERATION_TYPE: Key = Key::from_static_str("db.cosmosdb.operation_type"); +pub const DB_COSMOSDB_OPERATION_TYPE: &str = "db.cosmosdb.operation_type"; /// Cosmos client connection mode. -pub const DB_COSMOSDB_CONNECTION_MODE: Key = Key::from_static_str("db.cosmosdb.connection_mode"); +pub const DB_COSMOSDB_CONNECTION_MODE: &str = "db.cosmosdb.connection_mode"; /// Cosmos DB container name. /// /// # Examples /// /// - `anystring` -pub const DB_COSMOSDB_CONTAINER: Key = Key::from_static_str("db.cosmosdb.container"); +pub const DB_COSMOSDB_CONTAINER: &str = "db.cosmosdb.container"; /// Request payload size in bytes. -pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: Key = - Key::from_static_str("db.cosmosdb.request_content_length"); +pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: &str = "db.cosmosdb.request_content_length"; /// Cosmos DB status code. /// @@ -668,7 +664,7 @@ pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: Key = /// /// - `200` /// - `201` -pub const DB_COSMOSDB_STATUS_CODE: Key = Key::from_static_str("db.cosmosdb.status_code"); +pub const DB_COSMOSDB_STATUS_CODE: &str = "db.cosmosdb.status_code"; /// Cosmos DB sub status code. /// @@ -676,7 +672,7 @@ pub const DB_COSMOSDB_STATUS_CODE: Key = Key::from_static_str("db.cosmosdb.statu /// /// - `1000` /// - `1002` -pub const DB_COSMOSDB_SUB_STATUS_CODE: Key = Key::from_static_str("db.cosmosdb.sub_status_code"); +pub const DB_COSMOSDB_SUB_STATUS_CODE: &str = "db.cosmosdb.sub_status_code"; /// RU consumed for that operation. /// @@ -684,17 +680,17 @@ pub const DB_COSMOSDB_SUB_STATUS_CODE: Key = Key::from_static_str("db.cosmosdb.s /// /// - `46.18` /// - `1.0` -pub const DB_COSMOSDB_REQUEST_CHARGE: Key = Key::from_static_str("db.cosmosdb.request_charge"); +pub const DB_COSMOSDB_REQUEST_CHARGE: &str = "db.cosmosdb.request_charge"; /// Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. -pub const OTEL_STATUS_CODE: Key = Key::from_static_str("otel.status_code"); +pub const OTEL_STATUS_CODE: &str = "otel.status_code"; /// Description of the Status if it has a value, otherwise not set. /// /// # Examples /// /// - `resource not found` -pub const OTEL_STATUS_DESCRIPTION: Key = Key::from_static_str("otel.status_description"); +pub const OTEL_STATUS_DESCRIPTION: &str = "otel.status_description"; /// Type of the trigger which caused this function invocation. /// @@ -707,14 +703,14 @@ pub const OTEL_STATUS_DESCRIPTION: Key = Key::from_static_str("otel.status_descr /// trigger that corresponding incoming would have (i.e., this has /// nothing to do with the underlying transport used to make the API /// call to invoke the lambda, which is often HTTP). -pub const FAAS_TRIGGER: Key = Key::from_static_str("faas.trigger"); +pub const FAAS_TRIGGER: &str = "faas.trigger"; /// The invocation ID of the current function invocation. /// /// # Examples /// /// - `af9d5aa4-a685-4c5f-a22b-444f80b3cc28` -pub const FAAS_INVOCATION_ID: Key = Key::from_static_str("faas.invocation_id"); +pub const FAAS_INVOCATION_ID: &str = "faas.invocation_id"; /// The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. /// @@ -722,17 +718,17 @@ pub const FAAS_INVOCATION_ID: Key = Key::from_static_str("faas.invocation_id"); /// /// - `myBucketName` /// - `myDbName` -pub const FAAS_DOCUMENT_COLLECTION: Key = Key::from_static_str("faas.document.collection"); +pub const FAAS_DOCUMENT_COLLECTION: &str = "faas.document.collection"; /// Describes the type of the operation that was performed on the data. -pub const FAAS_DOCUMENT_OPERATION: Key = Key::from_static_str("faas.document.operation"); +pub const FAAS_DOCUMENT_OPERATION: &str = "faas.document.operation"; /// A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). /// /// # Examples /// /// - `2020-01-23T13:47:06Z` -pub const FAAS_DOCUMENT_TIME: Key = Key::from_static_str("faas.document.time"); +pub const FAAS_DOCUMENT_TIME: &str = "faas.document.time"; /// The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. /// @@ -740,24 +736,24 @@ pub const FAAS_DOCUMENT_TIME: Key = Key::from_static_str("faas.document.time"); /// /// - `myFile.txt` /// - `myTableName` -pub const FAAS_DOCUMENT_NAME: Key = Key::from_static_str("faas.document.name"); +pub const FAAS_DOCUMENT_NAME: &str = "faas.document.name"; /// A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). /// /// # Examples /// /// - `2020-01-23T13:47:06Z` -pub const FAAS_TIME: Key = Key::from_static_str("faas.time"); +pub const FAAS_TIME: &str = "faas.time"; /// A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). /// /// # Examples /// /// - `0/5 * * * ? *` -pub const FAAS_CRON: Key = Key::from_static_str("faas.cron"); +pub const FAAS_CRON: &str = "faas.cron"; /// A boolean that is true if the serverless function is executed for the first time (aka cold-start). -pub const FAAS_COLDSTART: Key = Key::from_static_str("faas.coldstart"); +pub const FAAS_COLDSTART: &str = "faas.coldstart"; /// The name of the invoked function. /// @@ -766,12 +762,12 @@ pub const FAAS_COLDSTART: Key = Key::from_static_str("faas.coldstart"); /// # Examples /// /// - `my-function` -pub const FAAS_INVOKED_NAME: Key = Key::from_static_str("faas.invoked_name"); +pub const FAAS_INVOKED_NAME: &str = "faas.invoked_name"; /// The cloud provider of the invoked function. /// /// SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. -pub const FAAS_INVOKED_PROVIDER: Key = Key::from_static_str("faas.invoked_provider"); +pub const FAAS_INVOKED_PROVIDER: &str = "faas.invoked_provider"; /// The cloud region of the invoked function. /// @@ -780,21 +776,21 @@ pub const FAAS_INVOKED_PROVIDER: Key = Key::from_static_str("faas.invoked_provid /// # Examples /// /// - `eu-central-1` -pub const FAAS_INVOKED_REGION: Key = Key::from_static_str("faas.invoked_region"); +pub const FAAS_INVOKED_REGION: &str = "faas.invoked_region"; /// The unique identifier of the feature flag. /// /// # Examples /// /// - `logo-color` -pub const FEATURE_FLAG_KEY: Key = Key::from_static_str("feature_flag.key"); +pub const FEATURE_FLAG_KEY: &str = "feature_flag.key"; /// The name of the service provider that performs the flag evaluation. /// /// # Examples /// /// - `Flag Manager` -pub const FEATURE_FLAG_PROVIDER_NAME: Key = Key::from_static_str("feature_flag.provider_name"); +pub const FEATURE_FLAG_PROVIDER_NAME: &str = "feature_flag.provider_name"; /// SHOULD be a semantic identifier for a value. If one is unavailable, a stringified version of the value can be used. /// @@ -812,7 +808,7 @@ pub const FEATURE_FLAG_PROVIDER_NAME: Key = Key::from_static_str("feature_flag.p /// - `red` /// - `true` /// - `on` -pub const FEATURE_FLAG_VARIANT: Key = Key::from_static_str("feature_flag.variant"); +pub const FEATURE_FLAG_VARIANT: &str = "feature_flag.variant"; /// [OSI Transport Layer](https://osi-model.com/transport-layer/) or [Inter-process Communication method](https://en.wikipedia.org/wiki/Inter-process_communication). The value SHOULD be normalized to lowercase. /// @@ -820,7 +816,7 @@ pub const FEATURE_FLAG_VARIANT: Key = Key::from_static_str("feature_flag.variant /// /// - `tcp` /// - `udp` -pub const NETWORK_TRANSPORT: Key = Key::from_static_str("network.transport"); +pub const NETWORK_TRANSPORT: &str = "network.transport"; /// [OSI Network Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. /// @@ -828,7 +824,7 @@ pub const NETWORK_TRANSPORT: Key = Key::from_static_str("network.transport"); /// /// - `ipv4` /// - `ipv6` -pub const NETWORK_TYPE: Key = Key::from_static_str("network.type"); +pub const NETWORK_TYPE: &str = "network.type"; /// [OSI Application Layer](https://osi-model.com/application-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. /// @@ -837,7 +833,7 @@ pub const NETWORK_TYPE: Key = Key::from_static_str("network.type"); /// - `amqp` /// - `http` /// - `mqtt` -pub const NETWORK_PROTOCOL_NAME: Key = Key::from_static_str("network.protocol.name"); +pub const NETWORK_PROTOCOL_NAME: &str = "network.protocol.name"; /// Version of the application layer protocol used. See note below. /// @@ -846,126 +842,126 @@ pub const NETWORK_PROTOCOL_NAME: Key = Key::from_static_str("network.protocol.na /// # Examples /// /// - `3.1.1` -pub const NETWORK_PROTOCOL_VERSION: Key = Key::from_static_str("network.protocol.version"); +pub const NETWORK_PROTOCOL_VERSION: &str = "network.protocol.version"; /// The internet connection type. /// /// # Examples /// /// - `wifi` -pub const NETWORK_CONNECTION_TYPE: Key = Key::from_static_str("network.connection.type"); +pub const NETWORK_CONNECTION_TYPE: &str = "network.connection.type"; /// This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. /// /// # Examples /// /// - `LTE` -pub const NETWORK_CONNECTION_SUBTYPE: Key = Key::from_static_str("network.connection.subtype"); +pub const NETWORK_CONNECTION_SUBTYPE: &str = "network.connection.subtype"; /// The name of the mobile carrier. /// /// # Examples /// /// - `sprint` -pub const NETWORK_CARRIER_NAME: Key = Key::from_static_str("network.carrier.name"); +pub const NETWORK_CARRIER_NAME: &str = "network.carrier.name"; /// The mobile carrier country code. /// /// # Examples /// /// - `310` -pub const NETWORK_CARRIER_MCC: Key = Key::from_static_str("network.carrier.mcc"); +pub const NETWORK_CARRIER_MCC: &str = "network.carrier.mcc"; /// The mobile carrier network code. /// /// # Examples /// /// - `001` -pub const NETWORK_CARRIER_MNC: Key = Key::from_static_str("network.carrier.mnc"); +pub const NETWORK_CARRIER_MNC: &str = "network.carrier.mnc"; /// The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. /// /// # Examples /// /// - `DE` -pub const NETWORK_CARRIER_ICC: Key = Key::from_static_str("network.carrier.icc"); +pub const NETWORK_CARRIER_ICC: &str = "network.carrier.icc"; /// The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. /// /// # Examples /// /// - `AuthTokenCache` -pub const PEER_SERVICE: Key = Key::from_static_str("peer.service"); +pub const PEER_SERVICE: &str = "peer.service"; /// Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system. /// /// # Examples /// /// - `username` -pub const ENDUSER_ID: Key = Key::from_static_str("enduser.id"); +pub const ENDUSER_ID: &str = "enduser.id"; /// Actual/assumed role the client is making the request under extracted from token or application security context. /// /// # Examples /// /// - `admin` -pub const ENDUSER_ROLE: Key = Key::from_static_str("enduser.role"); +pub const ENDUSER_ROLE: &str = "enduser.role"; /// Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). /// /// # Examples /// /// - `read:message, write:files` -pub const ENDUSER_SCOPE: Key = Key::from_static_str("enduser.scope"); +pub const ENDUSER_SCOPE: &str = "enduser.scope"; /// Current "managed" thread ID (as opposed to OS thread ID). /// /// # Examples /// /// - `42` -pub const THREAD_ID: Key = Key::from_static_str("thread.id"); +pub const THREAD_ID: &str = "thread.id"; /// Current thread name. /// /// # Examples /// /// - `main` -pub const THREAD_NAME: Key = Key::from_static_str("thread.name"); +pub const THREAD_NAME: &str = "thread.name"; /// The method or function name, or equivalent (usually rightmost part of the code unit's name). /// /// # Examples /// /// - `serveRequest` -pub const CODE_FUNCTION: Key = Key::from_static_str("code.function"); +pub const CODE_FUNCTION: &str = "code.function"; /// The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. /// /// # Examples /// /// - `com.example.MyHttpService` -pub const CODE_NAMESPACE: Key = Key::from_static_str("code.namespace"); +pub const CODE_NAMESPACE: &str = "code.namespace"; /// The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). /// /// # Examples /// /// - `/usr/local/MyApplication/content_root/app/index.php` -pub const CODE_FILEPATH: Key = Key::from_static_str("code.filepath"); +pub const CODE_FILEPATH: &str = "code.filepath"; /// The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. /// /// # Examples /// /// - `42` -pub const CODE_LINENO: Key = Key::from_static_str("code.lineno"); +pub const CODE_LINENO: &str = "code.lineno"; /// The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. /// /// # Examples /// /// - `16` -pub const CODE_COLUMN: Key = Key::from_static_str("code.column"); +pub const CODE_COLUMN: &str = "code.column"; /// Original HTTP method sent by the client in the request line. /// @@ -974,21 +970,21 @@ pub const CODE_COLUMN: Key = Key::from_static_str("code.column"); /// - `GeT` /// - `ACL` /// - `foo` -pub const HTTP_REQUEST_METHOD_ORIGINAL: Key = Key::from_static_str("http.request.method_original"); +pub const HTTP_REQUEST_METHOD_ORIGINAL: &str = "http.request.method_original"; /// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. /// /// # Examples /// /// - `3495` -pub const HTTP_REQUEST_BODY_SIZE: Key = Key::from_static_str("http.request.body.size"); +pub const HTTP_REQUEST_BODY_SIZE: &str = "http.request.body.size"; /// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. /// /// # Examples /// /// - `3495` -pub const HTTP_RESPONSE_BODY_SIZE: Key = Key::from_static_str("http.response.body.size"); +pub const HTTP_RESPONSE_BODY_SIZE: &str = "http.response.body.size"; /// The ordinal number of request resending attempt (for any reason, including redirects). /// @@ -997,7 +993,7 @@ pub const HTTP_RESPONSE_BODY_SIZE: Key = Key::from_static_str("http.response.bod /// # Examples /// /// - `3` -pub const HTTP_RESEND_COUNT: Key = Key::from_static_str("http.resend_count"); +pub const HTTP_RESEND_COUNT: &str = "http.resend_count"; /// The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid`. /// @@ -1005,7 +1001,7 @@ pub const HTTP_RESEND_COUNT: Key = Key::from_static_str("http.resend_count"); /// /// - `79b9da39-b7ae-508a-a6bc-864b2829c622` /// - `C9ER4AJX75574TDJ` -pub const AWS_REQUEST_ID: Key = Key::from_static_str("aws.request_id"); +pub const AWS_REQUEST_ID: &str = "aws.request_id"; /// The keys in the `RequestItems` object field. /// @@ -1013,23 +1009,21 @@ pub const AWS_REQUEST_ID: Key = Key::from_static_str("aws.request_id"); /// /// - `Users` /// - `Cats` -pub const AWS_DYNAMODB_TABLE_NAMES: Key = Key::from_static_str("aws.dynamodb.table_names"); +pub const AWS_DYNAMODB_TABLE_NAMES: &str = "aws.dynamodb.table_names"; /// The JSON-serialized value of each item in the `ConsumedCapacity` response field. /// /// # Examples /// /// - `{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": "string", "WriteCapacityUnits": number }` -pub const AWS_DYNAMODB_CONSUMED_CAPACITY: Key = - Key::from_static_str("aws.dynamodb.consumed_capacity"); +pub const AWS_DYNAMODB_CONSUMED_CAPACITY: &str = "aws.dynamodb.consumed_capacity"; /// The JSON-serialized value of the `ItemCollectionMetrics` response field. /// /// # Examples /// /// - `{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }` -pub const AWS_DYNAMODB_ITEM_COLLECTION_METRICS: Key = - Key::from_static_str("aws.dynamodb.item_collection_metrics"); +pub const AWS_DYNAMODB_ITEM_COLLECTION_METRICS: &str = "aws.dynamodb.item_collection_metrics"; /// The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. /// @@ -1037,8 +1031,7 @@ pub const AWS_DYNAMODB_ITEM_COLLECTION_METRICS: Key = /// /// - `1.0` /// - `2.0` -pub const AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: Key = - Key::from_static_str("aws.dynamodb.provisioned_read_capacity"); +pub const AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: &str = "aws.dynamodb.provisioned_read_capacity"; /// The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. /// @@ -1046,11 +1039,10 @@ pub const AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: Key = /// /// - `1.0` /// - `2.0` -pub const AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY: Key = - Key::from_static_str("aws.dynamodb.provisioned_write_capacity"); +pub const AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY: &str = "aws.dynamodb.provisioned_write_capacity"; /// The value of the `ConsistentRead` request parameter. -pub const AWS_DYNAMODB_CONSISTENT_READ: Key = Key::from_static_str("aws.dynamodb.consistent_read"); +pub const AWS_DYNAMODB_CONSISTENT_READ: &str = "aws.dynamodb.consistent_read"; /// The value of the `ProjectionExpression` request parameter. /// @@ -1059,14 +1051,14 @@ pub const AWS_DYNAMODB_CONSISTENT_READ: Key = Key::from_static_str("aws.dynamodb /// - `Title` /// - `Title, Price, Color` /// - `Title, Description, RelatedItems, ProductReviews` -pub const AWS_DYNAMODB_PROJECTION: Key = Key::from_static_str("aws.dynamodb.projection"); +pub const AWS_DYNAMODB_PROJECTION: &str = "aws.dynamodb.projection"; /// The value of the `Limit` request parameter. /// /// # Examples /// /// - `10` -pub const AWS_DYNAMODB_LIMIT: Key = Key::from_static_str("aws.dynamodb.limit"); +pub const AWS_DYNAMODB_LIMIT: &str = "aws.dynamodb.limit"; /// The value of the `AttributesToGet` request parameter. /// @@ -1074,15 +1066,14 @@ pub const AWS_DYNAMODB_LIMIT: Key = Key::from_static_str("aws.dynamodb.limit"); /// /// - `lives` /// - `id` -pub const AWS_DYNAMODB_ATTRIBUTES_TO_GET: Key = - Key::from_static_str("aws.dynamodb.attributes_to_get"); +pub const AWS_DYNAMODB_ATTRIBUTES_TO_GET: &str = "aws.dynamodb.attributes_to_get"; /// The value of the `IndexName` request parameter. /// /// # Examples /// /// - `name_to_group` -pub const AWS_DYNAMODB_INDEX_NAME: Key = Key::from_static_str("aws.dynamodb.index_name"); +pub const AWS_DYNAMODB_INDEX_NAME: &str = "aws.dynamodb.index_name"; /// The value of the `Select` request parameter. /// @@ -1090,23 +1081,21 @@ pub const AWS_DYNAMODB_INDEX_NAME: Key = Key::from_static_str("aws.dynamodb.inde /// /// - `ALL_ATTRIBUTES` /// - `COUNT` -pub const AWS_DYNAMODB_SELECT: Key = Key::from_static_str("aws.dynamodb.select"); +pub const AWS_DYNAMODB_SELECT: &str = "aws.dynamodb.select"; /// The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. /// /// # Examples /// /// - `{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": number } }` -pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES: Key = - Key::from_static_str("aws.dynamodb.global_secondary_indexes"); +pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES: &str = "aws.dynamodb.global_secondary_indexes"; /// The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. /// /// # Examples /// /// - `{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }` -pub const AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: Key = - Key::from_static_str("aws.dynamodb.local_secondary_indexes"); +pub const AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: &str = "aws.dynamodb.local_secondary_indexes"; /// The value of the `ExclusiveStartTableName` request parameter. /// @@ -1114,62 +1103,60 @@ pub const AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: Key = /// /// - `Users` /// - `CatsTable` -pub const AWS_DYNAMODB_EXCLUSIVE_START_TABLE: Key = - Key::from_static_str("aws.dynamodb.exclusive_start_table"); +pub const AWS_DYNAMODB_EXCLUSIVE_START_TABLE: &str = "aws.dynamodb.exclusive_start_table"; /// The the number of items in the `TableNames` response parameter. /// /// # Examples /// /// - `20` -pub const AWS_DYNAMODB_TABLE_COUNT: Key = Key::from_static_str("aws.dynamodb.table_count"); +pub const AWS_DYNAMODB_TABLE_COUNT: &str = "aws.dynamodb.table_count"; /// The value of the `ScanIndexForward` request parameter. -pub const AWS_DYNAMODB_SCAN_FORWARD: Key = Key::from_static_str("aws.dynamodb.scan_forward"); +pub const AWS_DYNAMODB_SCAN_FORWARD: &str = "aws.dynamodb.scan_forward"; /// The value of the `Segment` request parameter. /// /// # Examples /// /// - `10` -pub const AWS_DYNAMODB_SEGMENT: Key = Key::from_static_str("aws.dynamodb.segment"); +pub const AWS_DYNAMODB_SEGMENT: &str = "aws.dynamodb.segment"; /// The value of the `TotalSegments` request parameter. /// /// # Examples /// /// - `100` -pub const AWS_DYNAMODB_TOTAL_SEGMENTS: Key = Key::from_static_str("aws.dynamodb.total_segments"); +pub const AWS_DYNAMODB_TOTAL_SEGMENTS: &str = "aws.dynamodb.total_segments"; /// The value of the `Count` response parameter. /// /// # Examples /// /// - `10` -pub const AWS_DYNAMODB_COUNT: Key = Key::from_static_str("aws.dynamodb.count"); +pub const AWS_DYNAMODB_COUNT: &str = "aws.dynamodb.count"; /// The value of the `ScannedCount` response parameter. /// /// # Examples /// /// - `50` -pub const AWS_DYNAMODB_SCANNED_COUNT: Key = Key::from_static_str("aws.dynamodb.scanned_count"); +pub const AWS_DYNAMODB_SCANNED_COUNT: &str = "aws.dynamodb.scanned_count"; /// The JSON-serialized value of each item in the `AttributeDefinitions` request field. /// /// # Examples /// /// - `{ "AttributeName": "string", "AttributeType": "string" }` -pub const AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS: Key = - Key::from_static_str("aws.dynamodb.attribute_definitions"); +pub const AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS: &str = "aws.dynamodb.attribute_definitions"; /// The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field. /// /// # Examples /// /// - `{ "Create": { "IndexName": "string", "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": number } }` -pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: Key = - Key::from_static_str("aws.dynamodb.global_secondary_index_updates"); +pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: &str = + "aws.dynamodb.global_secondary_index_updates"; /// The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. /// @@ -1179,7 +1166,7 @@ pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: Key = /// # Examples /// /// - `some-bucket-name` -pub const AWS_S3_BUCKET: Key = Key::from_static_str("aws.s3.bucket"); +pub const AWS_S3_BUCKET: &str = "aws.s3.bucket"; /// The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. /// @@ -1203,7 +1190,7 @@ pub const AWS_S3_BUCKET: Key = Key::from_static_str("aws.s3.bucket"); /// # Examples /// /// - `someFile.yml` -pub const AWS_S3_KEY: Key = Key::from_static_str("aws.s3.key"); +pub const AWS_S3_KEY: &str = "aws.s3.key"; /// The source object (in the form `bucket`/`key`) for the copy operation. /// @@ -1217,7 +1204,7 @@ pub const AWS_S3_KEY: Key = Key::from_static_str("aws.s3.key"); /// # Examples /// /// - `someFile.yml` -pub const AWS_S3_COPY_SOURCE: Key = Key::from_static_str("aws.s3.copy_source"); +pub const AWS_S3_COPY_SOURCE: &str = "aws.s3.copy_source"; /// Upload ID that identifies the multipart upload. /// @@ -1234,7 +1221,7 @@ pub const AWS_S3_COPY_SOURCE: Key = Key::from_static_str("aws.s3.copy_source"); /// # Examples /// /// - `dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ` -pub const AWS_S3_UPLOAD_ID: Key = Key::from_static_str("aws.s3.upload_id"); +pub const AWS_S3_UPLOAD_ID: &str = "aws.s3.upload_id"; /// The delete request container that specifies the objects to be deleted. /// @@ -1245,7 +1232,7 @@ pub const AWS_S3_UPLOAD_ID: Key = Key::from_static_str("aws.s3.upload_id"); /// # Examples /// /// - `Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean` -pub const AWS_S3_DELETE: Key = Key::from_static_str("aws.s3.delete"); +pub const AWS_S3_DELETE: &str = "aws.s3.delete"; /// The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. /// @@ -1257,14 +1244,14 @@ pub const AWS_S3_DELETE: Key = Key::from_static_str("aws.s3.delete"); /// # Examples /// /// - `3456` -pub const AWS_S3_PART_NUMBER: Key = Key::from_static_str("aws.s3.part_number"); +pub const AWS_S3_PART_NUMBER: &str = "aws.s3.part_number"; /// The name of the operation being executed. /// /// # Examples /// /// - `findBookById` -pub const GRAPHQL_OPERATION_NAME: Key = Key::from_static_str("graphql.operation.name"); +pub const GRAPHQL_OPERATION_NAME: &str = "graphql.operation.name"; /// The type of the operation being executed. /// @@ -1273,7 +1260,7 @@ pub const GRAPHQL_OPERATION_NAME: Key = Key::from_static_str("graphql.operation. /// - `query` /// - `mutation` /// - `subscription` -pub const GRAPHQL_OPERATION_TYPE: Key = Key::from_static_str("graphql.operation.type"); +pub const GRAPHQL_OPERATION_TYPE: &str = "graphql.operation.type"; /// The GraphQL document being executed. /// @@ -1282,38 +1269,36 @@ pub const GRAPHQL_OPERATION_TYPE: Key = Key::from_static_str("graphql.operation. /// # Examples /// /// - `query findBookById { bookById(id: ?) { name } }` -pub const GRAPHQL_DOCUMENT: Key = Key::from_static_str("graphql.document"); +pub const GRAPHQL_DOCUMENT: &str = "graphql.document"; /// A value used by the messaging system as an identifier for the message, represented as a string. /// /// # Examples /// /// - `452a7c7c7c7048c2f887f61572b18fc2` -pub const MESSAGING_MESSAGE_ID: Key = Key::from_static_str("messaging.message.id"); +pub const MESSAGING_MESSAGE_ID: &str = "messaging.message.id"; /// The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". /// /// # Examples /// /// - `MyConversationId` -pub const MESSAGING_MESSAGE_CONVERSATION_ID: Key = - Key::from_static_str("messaging.message.conversation_id"); +pub const MESSAGING_MESSAGE_CONVERSATION_ID: &str = "messaging.message.conversation_id"; /// The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported. /// /// # Examples /// /// - `2738` -pub const MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES: Key = - Key::from_static_str("messaging.message.payload_size_bytes"); +pub const MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES: &str = "messaging.message.payload_size_bytes"; /// The compressed size of the message payload in bytes. /// /// # Examples /// /// - `2048` -pub const MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES: Key = - Key::from_static_str("messaging.message.payload_compressed_size_bytes"); +pub const MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES: &str = + "messaging.message.payload_compressed_size_bytes"; /// The message destination name. /// @@ -1324,7 +1309,7 @@ pub const MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES: Key = /// /// - `MyQueue` /// - `MyTopic` -pub const MESSAGING_DESTINATION_NAME: Key = Key::from_static_str("messaging.destination.name"); +pub const MESSAGING_DESTINATION_NAME: &str = "messaging.destination.name"; /// Low cardinality representation of the messaging destination name. /// @@ -1333,16 +1318,13 @@ pub const MESSAGING_DESTINATION_NAME: Key = Key::from_static_str("messaging.dest /// # Examples /// /// - `/customers/{customerId}` -pub const MESSAGING_DESTINATION_TEMPLATE: Key = - Key::from_static_str("messaging.destination.template"); +pub const MESSAGING_DESTINATION_TEMPLATE: &str = "messaging.destination.template"; /// A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. -pub const MESSAGING_DESTINATION_TEMPORARY: Key = - Key::from_static_str("messaging.destination.temporary"); +pub const MESSAGING_DESTINATION_TEMPORARY: &str = "messaging.destination.temporary"; /// A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). -pub const MESSAGING_DESTINATION_ANONYMOUS: Key = - Key::from_static_str("messaging.destination.anonymous"); +pub const MESSAGING_DESTINATION_ANONYMOUS: &str = "messaging.destination.anonymous"; /// A string identifying the messaging system. /// @@ -1353,12 +1335,12 @@ pub const MESSAGING_DESTINATION_ANONYMOUS: Key = /// - `rocketmq` /// - `activemq` /// - `AmazonSQS` -pub const MESSAGING_SYSTEM: Key = Key::from_static_str("messaging.system"); +pub const MESSAGING_SYSTEM: &str = "messaging.system"; /// A string identifying the kind of messaging operation as defined in the [Operation names](#operation-names) section above. /// /// If a custom value is used, it MUST be of low cardinality. -pub const MESSAGING_OPERATION: Key = Key::from_static_str("messaging.operation"); +pub const MESSAGING_OPERATION: &str = "messaging.operation"; /// The number of messages sent, received, or processed in the scope of the batching operation. /// @@ -1369,8 +1351,7 @@ pub const MESSAGING_OPERATION: Key = Key::from_static_str("messaging.operation") /// - `0` /// - `1` /// - `2` -pub const MESSAGING_BATCH_MESSAGE_COUNT: Key = - Key::from_static_str("messaging.batch.message_count"); +pub const MESSAGING_BATCH_MESSAGE_COUNT: &str = "messaging.batch.message_count"; /// A unique identifier for the client that consumes or produces a message. /// @@ -1378,15 +1359,15 @@ pub const MESSAGING_BATCH_MESSAGE_COUNT: Key = /// /// - `client-5` /// - `myhost@8742@s8083jm` -pub const MESSAGING_CLIENT_ID: Key = Key::from_static_str("messaging.client_id"); +pub const MESSAGING_CLIENT_ID: &str = "messaging.client_id"; /// RabbitMQ message routing key. /// /// # Examples /// /// - `myKey` -pub const MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: Key = - Key::from_static_str("messaging.rabbitmq.destination.routing_key"); +pub const MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: &str = + "messaging.rabbitmq.destination.routing_key"; /// Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. /// @@ -1395,86 +1376,78 @@ pub const MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: Key = /// # Examples /// /// - `myKey` -pub const MESSAGING_KAFKA_MESSAGE_KEY: Key = Key::from_static_str("messaging.kafka.message.key"); +pub const MESSAGING_KAFKA_MESSAGE_KEY: &str = "messaging.kafka.message.key"; /// Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers. /// /// # Examples /// /// - `my-group` -pub const MESSAGING_KAFKA_CONSUMER_GROUP: Key = - Key::from_static_str("messaging.kafka.consumer.group"); +pub const MESSAGING_KAFKA_CONSUMER_GROUP: &str = "messaging.kafka.consumer.group"; /// Partition the message is sent to. /// /// # Examples /// /// - `2` -pub const MESSAGING_KAFKA_DESTINATION_PARTITION: Key = - Key::from_static_str("messaging.kafka.destination.partition"); +pub const MESSAGING_KAFKA_DESTINATION_PARTITION: &str = "messaging.kafka.destination.partition"; /// The offset of a record in the corresponding Kafka partition. /// /// # Examples /// /// - `42` -pub const MESSAGING_KAFKA_MESSAGE_OFFSET: Key = - Key::from_static_str("messaging.kafka.message.offset"); +pub const MESSAGING_KAFKA_MESSAGE_OFFSET: &str = "messaging.kafka.message.offset"; /// A boolean that is true if the message is a tombstone. -pub const MESSAGING_KAFKA_MESSAGE_TOMBSTONE: Key = - Key::from_static_str("messaging.kafka.message.tombstone"); +pub const MESSAGING_KAFKA_MESSAGE_TOMBSTONE: &str = "messaging.kafka.message.tombstone"; /// Namespace of RocketMQ resources, resources in different namespaces are individual. /// /// # Examples /// /// - `myNamespace` -pub const MESSAGING_ROCKETMQ_NAMESPACE: Key = Key::from_static_str("messaging.rocketmq.namespace"); +pub const MESSAGING_ROCKETMQ_NAMESPACE: &str = "messaging.rocketmq.namespace"; /// Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind. /// /// # Examples /// /// - `myConsumerGroup` -pub const MESSAGING_ROCKETMQ_CLIENT_GROUP: Key = - Key::from_static_str("messaging.rocketmq.client_group"); +pub const MESSAGING_ROCKETMQ_CLIENT_GROUP: &str = "messaging.rocketmq.client_group"; /// The timestamp in milliseconds that the delay message is expected to be delivered to consumer. /// /// # Examples /// /// - `1665987217045` -pub const MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP: Key = - Key::from_static_str("messaging.rocketmq.message.delivery_timestamp"); +pub const MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP: &str = + "messaging.rocketmq.message.delivery_timestamp"; /// The delay time level for delay message, which determines the message delay time. /// /// # Examples /// /// - `3` -pub const MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL: Key = - Key::from_static_str("messaging.rocketmq.message.delay_time_level"); +pub const MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL: &str = + "messaging.rocketmq.message.delay_time_level"; /// It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. /// /// # Examples /// /// - `myMessageGroup` -pub const MESSAGING_ROCKETMQ_MESSAGE_GROUP: Key = - Key::from_static_str("messaging.rocketmq.message.group"); +pub const MESSAGING_ROCKETMQ_MESSAGE_GROUP: &str = "messaging.rocketmq.message.group"; /// Type of message. -pub const MESSAGING_ROCKETMQ_MESSAGE_TYPE: Key = - Key::from_static_str("messaging.rocketmq.message.type"); +pub const MESSAGING_ROCKETMQ_MESSAGE_TYPE: &str = "messaging.rocketmq.message.type"; /// The secondary classifier of message besides topic. /// /// # Examples /// /// - `tagA` -pub const MESSAGING_ROCKETMQ_MESSAGE_TAG: Key = - Key::from_static_str("messaging.rocketmq.message.tag"); +pub const MESSAGING_ROCKETMQ_MESSAGE_TAG: &str = "messaging.rocketmq.message.tag"; /// Key(s) of message, another way to mark message besides message id. /// @@ -1482,15 +1455,13 @@ pub const MESSAGING_ROCKETMQ_MESSAGE_TAG: Key = /// /// - `keyA` /// - `keyB` -pub const MESSAGING_ROCKETMQ_MESSAGE_KEYS: Key = - Key::from_static_str("messaging.rocketmq.message.keys"); +pub const MESSAGING_ROCKETMQ_MESSAGE_KEYS: &str = "messaging.rocketmq.message.keys"; /// Model of message consumption. This only applies to consumer spans. -pub const MESSAGING_ROCKETMQ_CONSUMPTION_MODEL: Key = - Key::from_static_str("messaging.rocketmq.consumption_model"); +pub const MESSAGING_ROCKETMQ_CONSUMPTION_MODEL: &str = "messaging.rocketmq.consumption_model"; /// A string identifying the remoting system. See below for a list of well-known identifiers. -pub const RPC_SYSTEM: Key = Key::from_static_str("rpc.system"); +pub const RPC_SYSTEM: &str = "rpc.system"; /// The full (logical) name of the service being called, including its package name, if applicable. /// @@ -1499,7 +1470,7 @@ pub const RPC_SYSTEM: Key = Key::from_static_str("rpc.system"); /// # Examples /// /// - `myservice.EchoService` -pub const RPC_SERVICE: Key = Key::from_static_str("rpc.service"); +pub const RPC_SERVICE: &str = "rpc.service"; /// The name of the (logical) method being called, must be equal to the $method part in the span name. /// @@ -1508,10 +1479,10 @@ pub const RPC_SERVICE: Key = Key::from_static_str("rpc.service"); /// # Examples /// /// - `exampleMethod` -pub const RPC_METHOD: Key = Key::from_static_str("rpc.method"); +pub const RPC_METHOD: &str = "rpc.method"; /// The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. -pub const RPC_GRPC_STATUS_CODE: Key = Key::from_static_str("rpc.grpc.status_code"); +pub const RPC_GRPC_STATUS_CODE: &str = "rpc.grpc.status_code"; /// Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted. /// @@ -1519,7 +1490,7 @@ pub const RPC_GRPC_STATUS_CODE: Key = Key::from_static_str("rpc.grpc.status_code /// /// - `2.0` /// - `1.0` -pub const RPC_JSONRPC_VERSION: Key = Key::from_static_str("rpc.jsonrpc.version"); +pub const RPC_JSONRPC_VERSION: &str = "rpc.jsonrpc.version"; /// `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. /// @@ -1528,7 +1499,7 @@ pub const RPC_JSONRPC_VERSION: Key = Key::from_static_str("rpc.jsonrpc.version") /// - `10` /// - `request-7` /// - `` -pub const RPC_JSONRPC_REQUEST_ID: Key = Key::from_static_str("rpc.jsonrpc.request_id"); +pub const RPC_JSONRPC_REQUEST_ID: &str = "rpc.jsonrpc.request_id"; /// `error.code` property of response if it is an error response. /// @@ -1536,7 +1507,7 @@ pub const RPC_JSONRPC_REQUEST_ID: Key = Key::from_static_str("rpc.jsonrpc.reques /// /// - `-32700` /// - `100` -pub const RPC_JSONRPC_ERROR_CODE: Key = Key::from_static_str("rpc.jsonrpc.error_code"); +pub const RPC_JSONRPC_ERROR_CODE: &str = "rpc.jsonrpc.error_code"; /// `error.message` property of response if it is an error response. /// @@ -1544,24 +1515,24 @@ pub const RPC_JSONRPC_ERROR_CODE: Key = Key::from_static_str("rpc.jsonrpc.error_ /// /// - `Parse error` /// - `User already exists` -pub const RPC_JSONRPC_ERROR_MESSAGE: Key = Key::from_static_str("rpc.jsonrpc.error_message"); +pub const RPC_JSONRPC_ERROR_MESSAGE: &str = "rpc.jsonrpc.error_message"; /// Whether this is a received or sent message. -pub const MESSAGE_TYPE: Key = Key::from_static_str("message.type"); +pub const MESSAGE_TYPE: &str = "message.type"; /// MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. /// /// This way we guarantee that the values will be consistent between different implementations. -pub const MESSAGE_ID: Key = Key::from_static_str("message.id"); +pub const MESSAGE_ID: &str = "message.id"; /// Compressed size of the message in bytes. -pub const MESSAGE_COMPRESSED_SIZE: Key = Key::from_static_str("message.compressed_size"); +pub const MESSAGE_COMPRESSED_SIZE: &str = "message.compressed_size"; /// Uncompressed size of the message in bytes. -pub const MESSAGE_UNCOMPRESSED_SIZE: Key = Key::from_static_str("message.uncompressed_size"); +pub const MESSAGE_UNCOMPRESSED_SIZE: &str = "message.uncompressed_size"; /// The [error codes](https://connect.build/docs/protocol/#error-codes) of the Connect request. Error codes are always string values. -pub const RPC_CONNECT_RPC_ERROR_CODE: Key = Key::from_static_str("rpc.connect_rpc.error_code"); +pub const RPC_CONNECT_RPC_ERROR_CODE: &str = "rpc.connect_rpc.error_code"; /// SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span. /// @@ -1581,7 +1552,7 @@ pub const RPC_CONNECT_RPC_ERROR_CODE: Key = Key::from_static_str("rpc.connect_rp /// even if the `exception.escaped` attribute was not set or set to false, /// since the event might have been recorded at a time where it was not /// clear whether the exception will escape. -pub const EXCEPTION_ESCAPED: Key = Key::from_static_str("exception.escaped"); +pub const EXCEPTION_ESCAPED: &str = "exception.escaped"; /// The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. /// @@ -1590,7 +1561,7 @@ pub const EXCEPTION_ESCAPED: Key = Key::from_static_str("exception.escaped"); /// - `https` /// - `ftp` /// - `telnet` -pub const URL_SCHEME: Key = Key::from_static_str("url.scheme"); +pub const URL_SCHEME: &str = "url.scheme"; /// Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). /// @@ -1602,7 +1573,7 @@ pub const URL_SCHEME: Key = Key::from_static_str("url.scheme"); /// /// - `https://www.foo.bar/search?q=OpenTelemetry#SemConv` /// - `//localhost` -pub const URL_FULL: Key = Key::from_static_str("url.full"); +pub const URL_FULL: &str = "url.full"; /// The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. /// @@ -1611,7 +1582,7 @@ pub const URL_FULL: Key = Key::from_static_str("url.full"); /// # Examples /// /// - `/search` -pub const URL_PATH: Key = Key::from_static_str("url.path"); +pub const URL_PATH: &str = "url.path"; /// The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. /// @@ -1620,18 +1591,18 @@ pub const URL_PATH: Key = Key::from_static_str("url.path"); /// # Examples /// /// - `q=OpenTelemetry` -pub const URL_QUERY: Key = Key::from_static_str("url.query"); +pub const URL_QUERY: &str = "url.query"; /// The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. /// /// # Examples /// /// - `SemConv` -pub const URL_FRAGMENT: Key = Key::from_static_str("url.fragment"); +pub const URL_FRAGMENT: &str = "url.fragment"; /// Value of the [HTTP User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. /// /// # Examples /// /// - `CERN-LineMode/2.15 libwww/2.17b3` -pub const USER_AGENT_ORIGINAL: Key = Key::from_static_str("user_agent.original"); +pub const USER_AGENT_ORIGINAL: &str = "user_agent.original"; diff --git a/opentelemetry-stackdriver/src/lib.rs b/opentelemetry-stackdriver/src/lib.rs index af6232f5f5..960c551bfa 100644 --- a/opentelemetry-stackdriver/src/lib.rs +++ b/opentelemetry-stackdriver/src/lib.rs @@ -37,10 +37,7 @@ use opentelemetry_sdk::{ }, Resource, }; -use opentelemetry_semantic_conventions::resource::SERVICE_NAME; -use opentelemetry_semantic_conventions::trace::{ - HTTP_METHOD, HTTP_ROUTE, HTTP_STATUS_CODE, HTTP_TARGET, HTTP_URL, -}; +use opentelemetry_semantic_conventions as semconv; use thiserror::Error; #[cfg(any(feature = "yup-authorizer", feature = "gcp_auth"))] use tonic::metadata::MetadataValue; @@ -54,8 +51,11 @@ use yup_oauth2::authenticator::Authenticator; #[allow(clippy::derive_partial_eq_without_eq)] // tonic doesn't derive Eq for generated types pub mod proto; -const HTTP_HOST: Key = Key::from_static_str("http.host"); -const HTTP_USER_AGENT: Key = Key::from_static_str("http.user_agent"); +const HTTP_HOST: &str = "http.host"; +const HTTP_PATH: &str = "http.path"; +const HTTP_USER_AGENT: &str = "http.user_agent"; + +const GCP_HTTP_PATH: &str = "/http/path"; use proto::devtools::cloudtrace::v2::span::time_event::Annotation; use proto::devtools::cloudtrace::v2::span::{ @@ -738,14 +738,14 @@ impl From<(Vec, &Resource)> for Attributes { return None; } - if k == SERVICE_NAME { + if k.as_str() == semconv::resource::SERVICE_NAME { return Some((GCP_SERVICE_NAME.to_owned(), v.into())); - } else if key == HTTP_PATH_ATTRIBUTE { + } else if key == HTTP_PATH { return Some((GCP_HTTP_PATH.to_owned(), v.into())); } for (otel_key, gcp_key) in KEY_MAP { - if otel_key == &k { + if otel_key == k.as_str() { return Some((gcp_key.to_owned(), v.into())); } } @@ -783,14 +783,15 @@ fn transform_links(links: &opentelemetry_sdk::trace::SpanLinks) -> Option } // Map conventional OpenTelemetry keys to their GCP counterparts. -const KEY_MAP: [(&Key, &str); 7] = [ - (&HTTP_HOST, "/http/host"), - (&HTTP_METHOD, "/http/method"), - (&HTTP_TARGET, "/http/path"), - (&HTTP_URL, "/http/url"), - (&HTTP_USER_AGENT, "/http/user_agent"), - (&HTTP_STATUS_CODE, "/http/status_code"), - (&HTTP_ROUTE, "/http/route"), +const KEY_MAP: [(&str, &str); 8] = [ + (HTTP_HOST, "/http/host"), + (semconv::trace::HTTP_METHOD, "/http/method"), + (semconv::trace::HTTP_TARGET, "/http/path"), + (semconv::trace::HTTP_URL, "/http/url"), + (HTTP_USER_AGENT, "/http/user_agent"), + (semconv::trace::HTTP_STATUS_CODE, "/http/status_code"), + (semconv::trace::HTTP_ROUTE, "/http/route"), + (HTTP_PATH, GCP_HTTP_PATH), ]; impl From for SpanKind { @@ -822,8 +823,6 @@ fn status(value: opentelemetry::trace::Status) -> Option { } const TRACE_APPEND: &str = "https://www.googleapis.com/auth/trace.append"; const LOGGING_WRITE: &str = "https://www.googleapis.com/auth/logging.write"; -const HTTP_PATH_ATTRIBUTE: &str = "http.path"; -const GCP_HTTP_PATH: &str = "/http/path"; const GCP_SERVICE_NAME: &str = "g.co/gae/app/module"; const MAX_ATTRIBUTES_PER_SPAN: usize = 32; @@ -839,33 +838,40 @@ mod tests { let mut attributes = Vec::with_capacity(capacity); // hostAttribute = "http.host" - attributes.push(HTTP_HOST.string("example.com:8080")); + attributes.push(KeyValue::new(HTTP_HOST, "example.com:8080")); // methodAttribute = "http.method" - attributes.push(semcov::trace::HTTP_METHOD.string("POST")); + attributes.push(KeyValue::new(semcov::trace::HTTP_METHOD, "POST")); // pathAttribute = "http.path" - attributes.push(KeyValue::new( - "http.path", - Value::String("/path/12314/?q=ddds#123".into()), - )); + attributes.push(KeyValue::new(HTTP_PATH, "/path/12314/?q=ddds#123")); // urlAttribute = "http.url" - attributes.push( - semcov::trace::HTTP_URL.string("https://example.com:8080/webshop/articles/4?s=1"), - ); + attributes.push(KeyValue::new( + semcov::trace::HTTP_URL, + "https://example.com:8080/webshop/articles/4?s=1", + )); // userAgentAttribute = "http.user_agent" - attributes.push(HTTP_USER_AGENT.string("CERN-LineMode/2.15 libwww/2.17b3")); + attributes.push(KeyValue::new( + HTTP_USER_AGENT, + "CERN-LineMode/2.15 libwww/2.17b3", + )); // statusCodeAttribute = "http.status_code" - attributes.push(semcov::trace::HTTP_STATUS_CODE.i64(200)); + attributes.push(KeyValue::new(semcov::trace::HTTP_STATUS_CODE, 200i64)); // statusCodeAttribute = "http.route" - attributes.push(semcov::trace::HTTP_ROUTE.string("/webshop/articles/:article_id")); + attributes.push(KeyValue::new( + semcov::trace::HTTP_ROUTE, + "/webshop/articles/:article_id", + )); // serviceAttribute = "service.name" - let resources = Resource::new([semcov::resource::SERVICE_NAME.string("Test Service Name")]); + let resources = Resource::new([KeyValue::new( + semcov::resource::SERVICE_NAME, + "Test Service Name", + )]); let actual: Attributes = (attributes, &resources).into(); @@ -919,7 +925,10 @@ mod tests { #[test] fn test_too_many() { - let resources = Resource::new([semcov::resource::SERVICE_NAME.string("Test Service Name")]); + let resources = Resource::new([KeyValue::new( + semcov::resource::SERVICE_NAME, + "Test Service Name", + )]); let mut attributes = Vec::with_capacity(32); for i in 0..32 { attributes.push(KeyValue::new( @@ -942,7 +951,10 @@ mod tests { #[test] fn test_attributes_mapping_http_target() { - let attributes = vec![semcov::trace::HTTP_TARGET.string("/path/12314/?q=ddds#123")]; + let attributes = vec![KeyValue::new( + semcov::trace::HTTP_TARGET, + "/path/12314/?q=ddds#123", + )]; // hostAttribute = "http.target" diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index bc124593a5..5f40bda53d 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -120,7 +120,7 @@ impl ZipkinPipelineBuilder { cfg.resource = Cow::Owned(Resource::new( cfg.resource .iter() - .filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME) + .filter(|(k, _v)| k.as_str() != semcov::resource::SERVICE_NAME) .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) .collect::>(), )); @@ -135,7 +135,7 @@ impl ZipkinPipelineBuilder { } else { let service_name = SdkProvidedResourceDetector .detect(Duration::from_secs(0)) - .get(semcov::resource::SERVICE_NAME) + .get(semcov::resource::SERVICE_NAME.into()) .unwrap() .to_string(); ( From cb2d127fe01899b4d2e8cb77dba63e25bfda6054 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Sun, 12 Nov 2023 13:32:38 -0800 Subject: [PATCH 24/68] Flush fix for SimpleLogProcessor (#1308) SimpleLogProcessor's `force_flush` was incorrectly implemented as it did not do anything assuming there was nothing to flush. But the SimpleLogProcessor uses a queue with pub-sub mechanism to operate, so flush is not a no-op. This is handled correctly for SimpleSpanProcessor. Instead of borrowing the approach from SimpleSpanProcessor, I have refactored the SimpleLogProcessor to use no queue and instead use a simple Mutex protected exporter/shutdown. I feel this is sufficient (and want to port this to SimpleSpanProcessor as well), but would like to get feedback on the approach - was there some scenario which prompted the queue/pub-sub for SimpleProcessors? Or this approach is sufficient? There should not be any perf concerns as SimpleProcessors are used for learning/dev scenarios, and not for production. (Except when you are exporting to operating system native tracing like etw, user_events, but for them we have written RentrantProcessor separately.) --- opentelemetry-sdk/src/logs/log_processor.rs | 59 +++++++-------------- opentelemetry-sdk/src/logs/mod.rs | 48 +++++++++++++++++ 2 files changed, 67 insertions(+), 40 deletions(-) diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index 342be3395f..cc3ffd5e13 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -13,7 +13,7 @@ use opentelemetry::{ global, logs::{LogError, LogResult}, }; -use std::thread; +use std::sync::Mutex; use std::{ fmt::{self, Debug, Formatter}, time::Duration, @@ -42,63 +42,42 @@ pub trait LogProcessor: Send + Sync + Debug { /// emitted. If you find this limiting, consider the batch processor instead. #[derive(Debug)] pub struct SimpleLogProcessor { - sender: crossbeam_channel::Sender>, - shutdown: crossbeam_channel::Receiver<()>, + exporter: Mutex>, } impl SimpleLogProcessor { - pub(crate) fn new(mut exporter: Box) -> Self { - let (log_tx, log_rx) = crossbeam_channel::unbounded(); - let (shutdown_tx, shutdown_rx) = crossbeam_channel::bounded(0); - - let _ = thread::Builder::new() - .name("opentelemetry-log-exporter".to_string()) - .spawn(move || { - while let Ok(Some(log)) = log_rx.recv() { - if let Err(err) = futures_executor::block_on(exporter.export(vec![log])) { - global::handle_error(err); - } - } - - exporter.shutdown(); - - if let Err(err) = shutdown_tx.send(()) { - global::handle_error(LogError::from(format!( - "could not send shutdown: {:?}", - err - ))); - } - }); - + pub(crate) fn new(exporter: Box) -> Self { SimpleLogProcessor { - sender: log_tx, - shutdown: shutdown_rx, + exporter: Mutex::new(exporter), } } } impl LogProcessor for SimpleLogProcessor { fn emit(&self, data: LogData) { - if let Err(err) = self.sender.send(Some(data)) { - global::handle_error(LogError::from(format!("error processing log {:?}", err))); + let result = self + .exporter + .lock() + .map_err(|_| LogError::Other("simple logprocessor mutex poison".into())) + .and_then(|mut exporter| futures_executor::block_on(exporter.export(vec![data]))); + if let Err(err) = result { + global::handle_error(err); } } fn force_flush(&self) -> LogResult<()> { - // Ignored since all logs in Simple Processor will be exported as they ended. Ok(()) } fn shutdown(&mut self) -> LogResult<()> { - if self.sender.send(None).is_ok() { - if let Err(err) = self.shutdown.recv() { - global::handle_error(LogError::from(format!( - "error shutting down log processor: {:?}", - err - ))) - } + if let Ok(mut exporter) = self.exporter.lock() { + exporter.shutdown(); + Ok(()) + } else { + Err(LogError::Other( + "simple logprocessor mutex poison during shutdown".into(), + )) } - Ok(()) } #[cfg(feature = "logs_level_enabled")] @@ -108,7 +87,7 @@ impl LogProcessor for SimpleLogProcessor { } /// A [`LogProcessor`] that asynchronously buffers log records and reports -/// them at a preconfigured interval. +/// them at a pre-configured interval. pub struct BatchLogProcessor { message_sender: R::Sender, } diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 45d16d5467..f3b8ebd11a 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -9,3 +9,51 @@ pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ BatchConfig, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, SimpleLogProcessor, }; + +#[cfg(all(test, feature = "testing"))] +mod tests { + use super::*; + use crate::testing::logs::InMemoryLogsExporter; + use opentelemetry::logs::{LogRecord, Logger, LoggerProvider as _, Severity}; + use opentelemetry::{logs::AnyValue, Key}; + + #[test] + fn logging_sdk_test() { + // Arrange + let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_log_processor(SimpleLogProcessor::new(Box::new(exporter.clone()))) + .build(); + + // Act + let logger = logger_provider.logger("test-logger"); + let mut log_record: LogRecord = LogRecord::default(); + log_record.severity_number = Some(Severity::Error); + log_record.severity_text = Some("Error".into()); + let attributes = vec![ + (Key::new("key1"), "value1".into()), + (Key::new("key2"), "value2".into()), + ]; + log_record.attributes = Some(attributes); + logger.emit(log_record); + + logger_provider.force_flush(); + + // Assert + let exported_logs = exporter + .get_emitted_logs() + .expect("Logs are expected to be exported."); + assert_eq!(exported_logs.len(), 1); + let log = exported_logs + .get(0) + .expect("Atleast one log is expected to be present."); + assert_eq!(log.instrumentation.name, "test-logger"); + assert_eq!(log.record.severity_number, Some(Severity::Error)); + let attributes: Vec<(Key, AnyValue)> = log + .record + .attributes + .clone() + .expect("Attributes are expected"); + assert_eq!(attributes.len(), 2); + } +} From 6175c3f1edf8510d947a6543c68512408ae15836 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Mon, 13 Nov 2023 15:18:30 -0500 Subject: [PATCH 25/68] Prepare for patch release (#1368) --- opentelemetry-prometheus/CHANGELOG.md | 2 ++ opentelemetry-prometheus/Cargo.toml | 2 +- opentelemetry-sdk/CHANGELOG.md | 2 ++ opentelemetry-sdk/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index cb3868519b..9436957992 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +## v0.14.1 + ### Fixed - Fix UCUM annotation escaping by ignoring unknown instrument units and annotations (#1348) diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index e883557449..260b306cc8 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-prometheus" -version = "0.14.0" +version = "0.14.1" description = "Prometheus exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index a5fadd3a1c..03c71af2f8 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -22,6 +22,8 @@ `SpanData` now stores `events` as `SpanEvents` instead of `EvictedQueue` where `SpanEvents` is a struct with a `Vec` of events and `dropped_count`. +## v0.21.1 + ### Fixed - Fix metric export corruption if gauges have not received a last value. (#1363) diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index ec7c73be5b..e81cd6739b 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry_sdk" -version = "0.21.0" +version = "0.21.1" description = "The SDK for the OpenTelemetry metrics collection and distributed tracing framework" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" From 59728c8947e9a5eeda085a245878c6db8b94bea4 Mon Sep 17 00:00:00 2001 From: Harold Dost Date: Mon, 13 Nov 2023 22:11:09 +0100 Subject: [PATCH 26/68] Remove contrib crates. (#1366) --- Cargo.toml | 8 - examples/traceresponse/Cargo.toml | 25 - examples/traceresponse/README.md | 28 - examples/traceresponse/src/client.rs | 69 - examples/traceresponse/src/server.rs | 67 - opentelemetry-aws/CHANGELOG.md | 65 - opentelemetry-aws/CODEOWNERS | 5 - opentelemetry-aws/Cargo.toml | 34 - opentelemetry-aws/LICENSE | 201 -- opentelemetry-aws/README.md | 27 - opentelemetry-aws/src/lib.rs | 391 ---- opentelemetry-contrib/CHANGELOG.md | 100 - opentelemetry-contrib/CODEOWNERS | 5 - opentelemetry-contrib/Cargo.toml | 55 - opentelemetry-contrib/LICENSE | 201 -- opentelemetry-contrib/README.md | 23 - opentelemetry-contrib/benches/new_span.rs | 183 -- opentelemetry-contrib/src/lib.rs | 33 - opentelemetry-contrib/src/trace/context.rs | 184 -- .../src/trace/exporter/jaeger_json.rs | 310 --- .../src/trace/exporter/mod.rs | 13 - opentelemetry-contrib/src/trace/mod.rs | 15 - .../trace/propagator/binary/base64_format.rs | 91 - .../propagator/binary/binary_propagator.rs | 178 -- .../src/trace/propagator/binary/mod.rs | 42 - .../src/trace/propagator/mod.rs | 12 - .../propagator/trace_context_response.rs | 236 -- .../src/trace/tracer_source.rs | 58 - opentelemetry-datadog/CHANGELOG.md | 94 - opentelemetry-datadog/CODEOWNERS | 5 - opentelemetry-datadog/Cargo.toml | 52 - opentelemetry-datadog/LICENSE | 201 -- opentelemetry-datadog/README.md | 40 - opentelemetry-datadog/examples/README.md | 16 - opentelemetry-datadog/examples/datadog.rs | 40 - opentelemetry-datadog/src/exporter/intern.rs | 53 - opentelemetry-datadog/src/exporter/mod.rs | 514 ----- .../src/exporter/model/mod.rs | 298 --- .../src/exporter/model/unified_tags.rs | 123 -- .../src/exporter/model/v03.rs | 126 -- .../src/exporter/model/v05.rs | 222 -- opentelemetry-datadog/src/lib.rs | 401 ---- opentelemetry-dynatrace/CHANGELOG.md | 31 - opentelemetry-dynatrace/CODEOWNERS | 5 - opentelemetry-dynatrace/LICENSE | 201 -- opentelemetry-dynatrace/README.md | 16 - opentelemetry-stackdriver/CHANGELOG.md | 105 - opentelemetry-stackdriver/Cargo.toml | 45 - opentelemetry-stackdriver/LICENSE-APACHE | 201 -- opentelemetry-stackdriver/README.md | 7 - .../proto/google/api/annotations.proto | 31 - .../proto/google/api/client.proto | 349 --- .../proto/google/api/field_behavior.proto | 90 - .../proto/google/api/http.proto | 375 ---- .../proto/google/api/label.proto | 48 - .../proto/google/api/launch_stage.proto | 72 - .../proto/google/api/monitored_resource.proto | 125 -- .../proto/google/api/resource.proto | 238 -- .../google/devtools/cloudtrace/v2/trace.proto | 387 ---- .../devtools/cloudtrace/v2/tracing.proto | 80 - .../google/logging/type/http_request.proto | 95 - .../google/logging/type/log_severity.proto | 71 - .../proto/google/logging/v2/log_entry.proto | 241 -- .../proto/google/logging/v2/logging.proto | 487 ---- .../google/logging/v2/logging_config.proto | 1957 ----------------- .../proto/google/rpc/status.proto | 49 - opentelemetry-stackdriver/src/lib.rs | 993 --------- opentelemetry-stackdriver/src/proto/api.rs | 1237 ----------- .../src/proto/devtools/cloudtrace/v2.rs | 626 ------ .../src/proto/logging/type.rs | 142 -- .../src/proto/logging/v2.rs | 837 ------- opentelemetry-stackdriver/src/proto/mod.rs | 14 - opentelemetry-stackdriver/src/proto/rpc.rs | 25 - opentelemetry-stackdriver/tests/generate.rs | 261 --- opentelemetry-user-events-logs/CHANGELOG.md | 15 - opentelemetry-user-events-logs/CODEOWNERS | 5 - opentelemetry-user-events-logs/Cargo.toml | 35 - opentelemetry-user-events-logs/LICENSE | 201 -- opentelemetry-user-events-logs/README.md | 17 - .../examples/basic.rs | 38 - opentelemetry-user-events-logs/src/lib.rs | 8 - .../src/logs/exporter.rs | 347 --- .../src/logs/mod.rs | 5 - .../src/logs/reentrant_logprocessor.rs | 61 - .../CHANGELOG.md | 23 - opentelemetry-user-events-metrics/CODEOWNERS | 5 - opentelemetry-user-events-metrics/Cargo.toml | 26 - opentelemetry-user-events-metrics/LICENSE | 201 -- opentelemetry-user-events-metrics/README.md | 17 - .../examples/basic.rs | 53 - .../src/exporter/mod.rs | 92 - opentelemetry-user-events-metrics/src/lib.rs | 5 - .../src/tracepoint/mod.rs | 117 - .../src/transform/mod.rs | 117 - opentelemetry-zpages/CHANGELOG.md | 42 - opentelemetry-zpages/CODEOWNERS | 5 - opentelemetry-zpages/Cargo.toml | 40 - opentelemetry-zpages/DESIGN.md | 60 - opentelemetry-zpages/LICENSE | 201 -- opentelemetry-zpages/README.md | 23 - opentelemetry-zpages/examples/README.md | 12 - opentelemetry-zpages/examples/zpages.rs | 110 - opentelemetry-zpages/src/lib.rs | 69 - opentelemetry-zpages/src/trace/aggregator.rs | 447 ---- opentelemetry-zpages/src/trace/mod.rs | 293 --- .../src/trace/span_processor.rs | 60 - opentelemetry-zpages/src/trace/span_queue.rs | 225 -- scripts/test.sh | 1 - 108 files changed, 17261 deletions(-) delete mode 100644 examples/traceresponse/Cargo.toml delete mode 100644 examples/traceresponse/README.md delete mode 100644 examples/traceresponse/src/client.rs delete mode 100644 examples/traceresponse/src/server.rs delete mode 100644 opentelemetry-aws/CHANGELOG.md delete mode 100644 opentelemetry-aws/CODEOWNERS delete mode 100644 opentelemetry-aws/Cargo.toml delete mode 100644 opentelemetry-aws/LICENSE delete mode 100644 opentelemetry-aws/README.md delete mode 100644 opentelemetry-aws/src/lib.rs delete mode 100644 opentelemetry-contrib/CHANGELOG.md delete mode 100644 opentelemetry-contrib/CODEOWNERS delete mode 100644 opentelemetry-contrib/Cargo.toml delete mode 100644 opentelemetry-contrib/LICENSE delete mode 100644 opentelemetry-contrib/README.md delete mode 100644 opentelemetry-contrib/benches/new_span.rs delete mode 100644 opentelemetry-contrib/src/lib.rs delete mode 100644 opentelemetry-contrib/src/trace/context.rs delete mode 100644 opentelemetry-contrib/src/trace/exporter/jaeger_json.rs delete mode 100644 opentelemetry-contrib/src/trace/exporter/mod.rs delete mode 100644 opentelemetry-contrib/src/trace/mod.rs delete mode 100644 opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs delete mode 100644 opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs delete mode 100644 opentelemetry-contrib/src/trace/propagator/binary/mod.rs delete mode 100644 opentelemetry-contrib/src/trace/propagator/mod.rs delete mode 100644 opentelemetry-contrib/src/trace/propagator/trace_context_response.rs delete mode 100644 opentelemetry-contrib/src/trace/tracer_source.rs delete mode 100644 opentelemetry-datadog/CHANGELOG.md delete mode 100644 opentelemetry-datadog/CODEOWNERS delete mode 100644 opentelemetry-datadog/Cargo.toml delete mode 100644 opentelemetry-datadog/LICENSE delete mode 100644 opentelemetry-datadog/README.md delete mode 100644 opentelemetry-datadog/examples/README.md delete mode 100644 opentelemetry-datadog/examples/datadog.rs delete mode 100644 opentelemetry-datadog/src/exporter/intern.rs delete mode 100644 opentelemetry-datadog/src/exporter/mod.rs delete mode 100644 opentelemetry-datadog/src/exporter/model/mod.rs delete mode 100644 opentelemetry-datadog/src/exporter/model/unified_tags.rs delete mode 100644 opentelemetry-datadog/src/exporter/model/v03.rs delete mode 100644 opentelemetry-datadog/src/exporter/model/v05.rs delete mode 100644 opentelemetry-datadog/src/lib.rs delete mode 100644 opentelemetry-dynatrace/CHANGELOG.md delete mode 100644 opentelemetry-dynatrace/CODEOWNERS delete mode 100644 opentelemetry-dynatrace/LICENSE delete mode 100644 opentelemetry-dynatrace/README.md delete mode 100644 opentelemetry-stackdriver/CHANGELOG.md delete mode 100644 opentelemetry-stackdriver/Cargo.toml delete mode 100644 opentelemetry-stackdriver/LICENSE-APACHE delete mode 100644 opentelemetry-stackdriver/README.md delete mode 100644 opentelemetry-stackdriver/proto/google/api/annotations.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/client.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/field_behavior.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/http.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/label.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/launch_stage.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/monitored_resource.proto delete mode 100644 opentelemetry-stackdriver/proto/google/api/resource.proto delete mode 100644 opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto delete mode 100644 opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto delete mode 100644 opentelemetry-stackdriver/proto/google/logging/type/http_request.proto delete mode 100644 opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto delete mode 100644 opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto delete mode 100644 opentelemetry-stackdriver/proto/google/logging/v2/logging.proto delete mode 100644 opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto delete mode 100644 opentelemetry-stackdriver/proto/google/rpc/status.proto delete mode 100644 opentelemetry-stackdriver/src/lib.rs delete mode 100644 opentelemetry-stackdriver/src/proto/api.rs delete mode 100644 opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs delete mode 100644 opentelemetry-stackdriver/src/proto/logging/type.rs delete mode 100644 opentelemetry-stackdriver/src/proto/logging/v2.rs delete mode 100644 opentelemetry-stackdriver/src/proto/mod.rs delete mode 100644 opentelemetry-stackdriver/src/proto/rpc.rs delete mode 100644 opentelemetry-stackdriver/tests/generate.rs delete mode 100644 opentelemetry-user-events-logs/CHANGELOG.md delete mode 100644 opentelemetry-user-events-logs/CODEOWNERS delete mode 100644 opentelemetry-user-events-logs/Cargo.toml delete mode 100644 opentelemetry-user-events-logs/LICENSE delete mode 100644 opentelemetry-user-events-logs/README.md delete mode 100644 opentelemetry-user-events-logs/examples/basic.rs delete mode 100644 opentelemetry-user-events-logs/src/lib.rs delete mode 100644 opentelemetry-user-events-logs/src/logs/exporter.rs delete mode 100644 opentelemetry-user-events-logs/src/logs/mod.rs delete mode 100644 opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs delete mode 100644 opentelemetry-user-events-metrics/CHANGELOG.md delete mode 100644 opentelemetry-user-events-metrics/CODEOWNERS delete mode 100644 opentelemetry-user-events-metrics/Cargo.toml delete mode 100644 opentelemetry-user-events-metrics/LICENSE delete mode 100644 opentelemetry-user-events-metrics/README.md delete mode 100644 opentelemetry-user-events-metrics/examples/basic.rs delete mode 100644 opentelemetry-user-events-metrics/src/exporter/mod.rs delete mode 100644 opentelemetry-user-events-metrics/src/lib.rs delete mode 100644 opentelemetry-user-events-metrics/src/tracepoint/mod.rs delete mode 100644 opentelemetry-user-events-metrics/src/transform/mod.rs delete mode 100644 opentelemetry-zpages/CHANGELOG.md delete mode 100644 opentelemetry-zpages/CODEOWNERS delete mode 100644 opentelemetry-zpages/Cargo.toml delete mode 100644 opentelemetry-zpages/DESIGN.md delete mode 100644 opentelemetry-zpages/LICENSE delete mode 100644 opentelemetry-zpages/README.md delete mode 100644 opentelemetry-zpages/examples/README.md delete mode 100644 opentelemetry-zpages/examples/zpages.rs delete mode 100644 opentelemetry-zpages/src/lib.rs delete mode 100644 opentelemetry-zpages/src/trace/aggregator.rs delete mode 100644 opentelemetry-zpages/src/trace/mod.rs delete mode 100644 opentelemetry-zpages/src/trace/span_processor.rs delete mode 100644 opentelemetry-zpages/src/trace/span_queue.rs diff --git a/Cargo.toml b/Cargo.toml index 52eec49037..29471f38d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,6 @@ [workspace] members = [ "opentelemetry", - "opentelemetry-aws", - "opentelemetry-contrib", - "opentelemetry-datadog", "opentelemetry-http", "opentelemetry-jaeger", "opentelemetry-jaeger/examples/actix-udp", @@ -15,19 +12,14 @@ members = [ "opentelemetry-proto", "opentelemetry-sdk", "opentelemetry-semantic-conventions", - "opentelemetry-stackdriver", "opentelemetry-stdout", - "opentelemetry-user-events-logs", - "opentelemetry-user-events-metrics", "opentelemetry-zipkin", - "opentelemetry-zpages", "opentelemetry-otlp/examples/basic-otlp", "opentelemetry-otlp/examples/basic-otlp-http", "opentelemetry-otlp/examples/external-otlp-grpcio-async-std", "examples/metrics-basic", "examples/metrics-advanced", "examples/logs-basic", - "examples/traceresponse", "examples/tracing-grpc", "examples/tracing-jaeger", "stress", diff --git a/examples/traceresponse/Cargo.toml b/examples/traceresponse/Cargo.toml deleted file mode 100644 index 2241121773..0000000000 --- a/examples/traceresponse/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "traceresponse" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false - -[[bin]] # Bin to run the http server -name = "http-server" -path = "src/server.rs" -doc = false - -[[bin]] # Bin to run the client -name = "http-client" -path = "src/client.rs" -doc = false - -[dependencies] -hyper = { version = "0.14", features = ["full"] } -tokio = { version = "1.0", features = ["full"] } -opentelemetry = { path = "../../opentelemetry" } -opentelemetry_sdk = { path = "../../opentelemetry-sdk" } -opentelemetry-http = { path = "../../opentelemetry-http" } -opentelemetry-contrib = { path = "../../opentelemetry-contrib" } -opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] } diff --git a/examples/traceresponse/README.md b/examples/traceresponse/README.md deleted file mode 100644 index aeaa6bdbb7..0000000000 --- a/examples/traceresponse/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# HTTP Example - -This is a simple example using [hyper] that demonstrates tracing http request -from client to server, and from the server back to the client using the -[W3C Trace Context Response] header. The example shows key aspects of tracing -such as: - -- Root Span (on Client) -- Child Span from a Remote Parent (on Server) -- SpanContext Propagation (from Client to Server) -- SpanContext Propagation (from Server to Client) -- Span Events -- Span Attributes - -[hyper]: https://hyper.rs/ -[W3C Trace Context Response]: https://w3c.github.io/trace-context/#traceresponse-header - -## Usage - -```shell -# Run server -$ cargo run --bin http-server - -# In another tab, run client -$ cargo run --bin http-client - -# The spans should be visible in stdout in the order that they were exported. -``` diff --git a/examples/traceresponse/src/client.rs b/examples/traceresponse/src/client.rs deleted file mode 100644 index d1f6c5950c..0000000000 --- a/examples/traceresponse/src/client.rs +++ /dev/null @@ -1,69 +0,0 @@ -use hyper::http::HeaderValue; -use hyper::{body::Body, Client}; -use opentelemetry::{ - global, - propagation::TextMapPropagator, - trace::{SpanKind, TraceContextExt, Tracer}, - Context, KeyValue, -}; -use opentelemetry_contrib::trace::propagator::trace_context_response::TraceContextResponsePropagator; -use opentelemetry_http::{HeaderExtractor, HeaderInjector}; -use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; -use opentelemetry_stdout::SpanExporter; - -fn init_tracer() { - global::set_text_map_propagator(TraceContextPropagator::new()); - // Install stdout exporter pipeline to be able to retrieve the collected spans. - // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production - // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. - let provider = TracerProvider::builder() - .with_simple_exporter(SpanExporter::default()) - .build(); - - global::set_tracer_provider(provider); -} - -#[tokio::main] -async fn main() -> std::result::Result<(), Box> { - init_tracer(); - - let client = Client::new(); - let tracer = global::tracer("example/client"); - let span = tracer - .span_builder("say hello") - .with_kind(SpanKind::Client) - .start(&tracer); - let cx = Context::current_with_span(span); - - let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000"); - global::get_text_map_propagator(|propagator| { - propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())) - }); - let res = client.request(req.body(Body::from("Hello!"))?).await?; - - let response_propagator: &dyn TextMapPropagator = &TraceContextResponsePropagator::new(); - - let response_cx = - response_propagator.extract_with_context(&cx, &HeaderExtractor(res.headers())); - - let response_span = response_cx.span(); - - cx.span().add_event( - "Got response!".to_string(), - vec![ - KeyValue::new("status", res.status().to_string()), - KeyValue::new( - "traceresponse", - res.headers() - .get("traceresponse") - .unwrap_or(&HeaderValue::from_static("")) - .to_str() - .unwrap() - .to_string(), - ), - KeyValue::new("child_sampled", response_span.span_context().is_sampled()), - ], - ); - - Ok(()) -} diff --git a/examples/traceresponse/src/server.rs b/examples/traceresponse/src/server.rs deleted file mode 100644 index 415342c695..0000000000 --- a/examples/traceresponse/src/server.rs +++ /dev/null @@ -1,67 +0,0 @@ -use hyper::{ - service::{make_service_fn, service_fn}, - Body, Request, Response, Server, -}; -use opentelemetry::{ - global, - propagation::TextMapPropagator, - trace::{SpanKind, TraceContextExt, Tracer}, - Context, -}; -use opentelemetry_contrib::trace::propagator::trace_context_response::TraceContextResponsePropagator; -use opentelemetry_http::{HeaderExtractor, HeaderInjector}; -use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; -use opentelemetry_stdout::SpanExporter; -use std::{convert::Infallible, net::SocketAddr}; - -async fn handle(req: Request) -> Result, Infallible> { - let parent_cx = global::get_text_map_propagator(|propagator| { - propagator.extract(&HeaderExtractor(req.headers())) - }); - let _cx_guard = parent_cx.attach(); - - let tracer = global::tracer("example/server"); - let span = tracer - .span_builder("say hello") - .with_kind(SpanKind::Server) - .start(&tracer); - - let cx = Context::current_with_span(span); - - cx.span().add_event("handling this...", Vec::new()); - - let mut res = Response::new("Hello, World!".into()); - - let response_propagator: &dyn TextMapPropagator = &TraceContextResponsePropagator::new(); - response_propagator.inject_context(&cx, &mut HeaderInjector(res.headers_mut())); - - Ok(res) -} - -fn init_tracer() { - global::set_text_map_propagator(TraceContextPropagator::new()); - - // Install stdout exporter pipeline to be able to retrieve the collected spans. - // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production - // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. - let provider = TracerProvider::builder() - .with_simple_exporter(SpanExporter::default()) - .build(); - - global::set_tracer_provider(provider); -} - -#[tokio::main] -async fn main() { - init_tracer(); - let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); - - let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); - - let server = Server::bind(&addr).serve(make_svc); - - println!("Listening on {addr}"); - if let Err(e) = server.await { - eprintln!("server error: {e}"); - } -} diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md deleted file mode 100644 index 6b6327db14..0000000000 --- a/opentelemetry-aws/CHANGELOG.md +++ /dev/null @@ -1,65 +0,0 @@ -# Changelog - -## vNext - -## v0.9.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) - -## v0.8.0 - -### Changed - -- Update to opentelemetry-api v0.20.0 - -## v0.7.0 -### Added -- Add public functions for AWS trace header [#887](https://github.com/open-telemetry/opentelemetry-rust/pull/887). - -### Changed -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). - -## v0.6.0 - -### Changed - -- reduce `tokio` feature requirements #750 -- Update to opentelemetry v0.18.0 - -### Fixed - -- Fix XrayPropagator when no header is present #867 - -## v0.5.0 - -### Changed - -- Update to opentelemetry v0.17.0 - -## v0.4.0 - -### Changed - -- Update to opentelemetry v0.16.0 - -## v0.3.0 - -### Changed - -- Update to opentelemetry v0.15.0 - -## v0.2.0 - -### Changed - -- Update to opentelemetry v0.14.0 - -## v0.1.0 - -### Added - -- AWS XRay propagator #446 diff --git a/opentelemetry-aws/CODEOWNERS b/opentelemetry-aws/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-aws/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml deleted file mode 100644 index b6f42e1357..0000000000 --- a/opentelemetry-aws/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "opentelemetry-aws" -version = "0.9.0" -description = "AWS exporters and propagators for OpenTelemetry" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" -readme = "README.md" -categories = [ - "development-tools::debugging", - "development-tools::profiling", -] -keywords = ["opentelemetry", "tracing"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[features] -default = ["trace"] -trace = ["opentelemetry/trace"] - -[dependencies] -once_cell = "1.12" -opentelemetry = { version = "0.21", path = "../opentelemetry" } - -[dev-dependencies] -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["trace", "testing"] } -opentelemetry-http = { path = "../opentelemetry-http" } -opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["trace"] } -hyper = { version = "0.14" } -tokio = { version = "1.0", features = ["macros", "rt"] } diff --git a/opentelemetry-aws/LICENSE b/opentelemetry-aws/LICENSE deleted file mode 100644 index 23a2acabc4..0000000000 --- a/opentelemetry-aws/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 The OpenTelemetry Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-aws/README.md b/opentelemetry-aws/README.md deleted file mode 100644 index 7f6cefc76c..0000000000 --- a/opentelemetry-aws/README.md +++ /dev/null @@ -1,27 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# OpenTelemetry AWS - -Additional types for exporting [`OpenTelemetry`] data to AWS. - -[![Crates.io: opentelemetry-aws](https://img.shields.io/crates/v/opentelemetry-aws.svg)](https://crates.io/crates/opentelemetry-aws) -[![Documentation](https://docs.rs/opentelemetry-aws/badge.svg)](https://docs.rs/opentelemetry-aws) -[![LICENSE](https://img.shields.io/crates/l/opentelemetry-aws)](./LICENSE) -[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) - -## Overview - -[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, -generate, collect, and export telemetry data (metrics, logs, and traces) for -analysis in order to understand your software's performance and behavior. This -crate provides additional propagators and exporters for sending telemetry data -to AWS's telemetry platform. - -## Supported component - -Currently, this crate only supports `XRay` propagator. Contributions are welcome. - -[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-aws/src/lib.rs b/opentelemetry-aws/src/lib.rs deleted file mode 100644 index 8a8d0e10ab..0000000000 --- a/opentelemetry-aws/src/lib.rs +++ /dev/null @@ -1,391 +0,0 @@ -//! This crate provides unofficial integration with AWS services. -//! -//! # Components -//! As for now, the only components provided in this crate is AWS X-Ray propagator. -//! -//! ### AWS X-Ray Propagator -//! This propagator helps propagate tracing information from upstream services to downstream services. -//! -//! ### Quick start -//! ```no_run -//! use opentelemetry::{global, trace::{Tracer, TracerProvider as _}}; -//! use opentelemetry_aws::trace::XrayPropagator; -//! use opentelemetry_sdk::trace::TracerProvider; -//! use opentelemetry_stdout::SpanExporter; -//! use opentelemetry_http::HeaderInjector; -//! -//! #[tokio::main] -//! async fn main() -> std::result::Result<(), Box> { -//! // Set the global propagator to X-Ray propagator -//! global::set_text_map_propagator(XrayPropagator::default()); -//! let provider = TracerProvider::builder() -//! .with_simple_exporter(SpanExporter::default()) -//! .build(); -//! let tracer = provider.tracer("readme_example"); -//! -//! let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000"); -//! tracer.in_span("doing_work", |cx| { -//! // Send request to downstream services. -//! // Build request -//! global::get_text_map_propagator(|propagator| { -//! // Set X-Ray tracing header in request object `req` -//! propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())); -//! println!("Headers: {:?}", req.headers_ref()); -//! }) -//! }); -//! -//! Ok(()) -//! } -//! ``` -//! A more detailed example can be found in [opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/aws-xray) repo - -#[cfg(feature = "trace")] -pub use trace::XrayPropagator; - -#[cfg(feature = "trace")] -pub mod trace { - use once_cell::sync::Lazy; - use opentelemetry::{ - global::{self, Error}, - propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, - trace::{ - SpanContext, SpanId, TraceContextExt, TraceError, TraceFlags, TraceId, TraceState, - }, - Context, - }; - use std::borrow::Cow; - use std::convert::TryFrom; - - const AWS_XRAY_TRACE_HEADER: &str = "x-amzn-trace-id"; - const AWS_XRAY_VERSION_KEY: &str = "1"; - const HEADER_PARENT_KEY: &str = "Parent"; - const HEADER_ROOT_KEY: &str = "Root"; - const HEADER_SAMPLED_KEY: &str = "Sampled"; - - const SAMPLED: &str = "1"; - const NOT_SAMPLED: &str = "0"; - const REQUESTED_SAMPLE_DECISION: &str = "?"; - - const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - - static AWS_XRAY_HEADER_FIELD: Lazy<[String; 1]> = - Lazy::new(|| [AWS_XRAY_TRACE_HEADER.to_owned()]); - - /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using AWS X-Ray header format. - /// - /// Extracts and injects values to/from the `x-amzn-trace-id` header. Converting between - /// OpenTelemetry [SpanContext][otel-spec] and [X-Ray Trace format][xray-trace-id]. - /// - /// For details on the [`x-amzn-trace-id` header][xray-header] see the AWS X-Ray Docs. - /// - /// ## Example - /// - /// ``` - /// use opentelemetry::global; - /// use opentelemetry_aws::trace::XrayPropagator; - /// - /// global::set_text_map_propagator(XrayPropagator::default()); - /// ``` - /// - /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext - /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids - /// [xray-header]: https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-tracingheader - #[derive(Clone, Debug, Default)] - pub struct XrayPropagator { - _private: (), - } - - /// Extract `SpanContext` from AWS X-Ray format string - /// - /// Extract OpenTelemetry [SpanContext][otel-spec] from [X-Ray Trace format][xray-trace-id] string. - /// - /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext - /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids - pub fn span_context_from_str(value: &str) -> Option { - let parts: Vec<(&str, &str)> = value - .split_terminator(';') - .filter_map(from_key_value_pair) - .collect(); - - let mut trace_id = TraceId::INVALID; - let mut parent_segment_id = SpanId::INVALID; - let mut sampling_decision = TRACE_FLAG_DEFERRED; - let mut kv_vec = Vec::with_capacity(parts.len()); - - for (key, value) in parts { - match key { - HEADER_ROOT_KEY => match TraceId::try_from(XrayTraceId(Cow::from(value))) { - Err(_) => return None, - Ok(parsed) => trace_id = parsed, - }, - HEADER_PARENT_KEY => { - parent_segment_id = SpanId::from_hex(value).unwrap_or(SpanId::INVALID) - } - HEADER_SAMPLED_KEY => { - sampling_decision = match value { - NOT_SAMPLED => TraceFlags::default(), - SAMPLED => TraceFlags::SAMPLED, - REQUESTED_SAMPLE_DECISION => TRACE_FLAG_DEFERRED, - _ => TRACE_FLAG_DEFERRED, - } - } - _ => kv_vec.push((key.to_ascii_lowercase(), value.to_string())), - } - } - - match TraceState::from_key_value(kv_vec) { - Ok(trace_state) => { - if trace_id == TraceId::INVALID { - return None; - } - - Some(SpanContext::new( - trace_id, - parent_segment_id, - sampling_decision, - true, - trace_state, - )) - } - Err(trace_state_err) => { - global::handle_error(Error::Trace(TraceError::Other(Box::new(trace_state_err)))); - None //todo: assign an error type instead of using None - } - } - } - - /// Generate AWS X-Ray format string from `SpanContext` - /// - /// Generate [X-Ray Trace format][xray-trace-id] string from OpenTelemetry [SpanContext][otel-spec] - /// - /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids - /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext - pub fn span_context_to_string(span_context: &SpanContext) -> Option { - if !span_context.is_valid() { - return None; - } - - let xray_trace_id = XrayTraceId::from(span_context.trace_id()); - - let sampling_decision = - if span_context.trace_flags() & TRACE_FLAG_DEFERRED == TRACE_FLAG_DEFERRED { - REQUESTED_SAMPLE_DECISION - } else if span_context.is_sampled() { - SAMPLED - } else { - NOT_SAMPLED - }; - - let trace_state_header = span_context - .trace_state() - .header_delimited("=", ";") - .split_terminator(';') - .map(title_case) - .collect::>() - .join(";"); - let trace_state_prefix = if trace_state_header.is_empty() { - "" - } else { - ";" - }; - - Some(format!( - "{}={};{}={:016x};{}={}{}{}", - HEADER_ROOT_KEY, - xray_trace_id.0, - HEADER_PARENT_KEY, - span_context.span_id(), - HEADER_SAMPLED_KEY, - sampling_decision, - trace_state_prefix, - trace_state_header - )) - } - - impl XrayPropagator { - /// Creates a new `XrayTraceContextPropagator`. - pub fn new() -> Self { - XrayPropagator::default() - } - - fn extract_span_context(&self, extractor: &dyn Extractor) -> Option { - span_context_from_str(extractor.get(AWS_XRAY_TRACE_HEADER)?.trim()) - } - } - - impl TextMapPropagator for XrayPropagator { - fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { - let span = cx.span(); - let span_context = span.span_context(); - if let Some(header_value) = span_context_to_string(span_context) { - injector.set(AWS_XRAY_TRACE_HEADER, header_value); - } - } - - fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - self.extract_span_context(extractor) - .map(|sc| cx.with_remote_span_context(sc)) - .unwrap_or_else(|| cx.clone()) - } - - fn fields(&self) -> FieldIter<'_> { - FieldIter::new(AWS_XRAY_HEADER_FIELD.as_ref()) - } - } - - /// Holds an X-Ray formatted Trace ID - /// - /// A `trace_id` consists of three numbers separated by hyphens. For example, `1-58406520-a006649127e371903a2de979`. - /// This includes: - /// - /// * The version number, that is, 1. - /// * The time of the original request, in Unix epoch time, in 8 hexadecimal digits. - /// * For example, 10:00AM December 1st, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal digits. - /// * A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits. - /// - /// See the [AWS X-Ray Documentation][xray-trace-id] for more details. - /// - /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids - #[derive(Clone, Debug, PartialEq)] - struct XrayTraceId<'a>(Cow<'a, str>); - - impl<'a> TryFrom> for TraceId { - type Error = (); - - fn try_from(id: XrayTraceId<'a>) -> Result { - let parts: Vec<&str> = id.0.split_terminator('-').collect(); - - if parts.len() != 3 { - return Err(()); - } - - let trace_id: TraceId = - TraceId::from_hex(format!("{}{}", parts[1], parts[2]).as_str()).map_err(|_| ())?; - - if trace_id == TraceId::INVALID { - Err(()) - } else { - Ok(trace_id) - } - } - } - - impl From for XrayTraceId<'static> { - fn from(trace_id: TraceId) -> Self { - let trace_id_as_hex = trace_id.to_string(); - let (timestamp, xray_id) = trace_id_as_hex.split_at(8_usize); - - XrayTraceId(Cow::from(format!( - "{}-{}-{}", - AWS_XRAY_VERSION_KEY, timestamp, xray_id - ))) - } - } - - fn from_key_value_pair(pair: &str) -> Option<(&str, &str)> { - let mut key_value_pair: Option<(&str, &str)> = None; - - if let Some(index) = pair.find('=') { - let (key, value) = pair.split_at(index); - key_value_pair = Some((key, value.trim_start_matches('='))); - } - key_value_pair - } - - fn title_case(s: &str) -> String { - let mut capitalized: String = String::with_capacity(s.len()); - - if !s.is_empty() { - let mut characters = s.chars(); - - if let Some(first) = characters.next() { - capitalized.push(first.to_ascii_uppercase()) - } - capitalized.extend(characters); - } - - capitalized - } - - #[cfg(test)] - mod tests { - use super::*; - use opentelemetry::trace::TraceState; - use opentelemetry_sdk::testing::trace::TestSpan; - use std::collections::HashMap; - use std::str::FromStr; - - #[rustfmt::skip] - fn extract_test_data() -> Vec<(&'static str, SpanContext)> { - vec![ - ("", SpanContext::empty_context()), - ("Sampled=1;Self=foo", SpanContext::empty_context()), - ("Root=1-bogus-bad", SpanContext::empty_context()), - ("Root=1-too-many-parts", SpanContext::empty_context()), - ("Root=1-58406520-a006649127e371903a2de979;Parent=garbage", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=0", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::default(), true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=?", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Self=1-58406520-bf42676c05e20ba4a90e448e;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e").unwrap())), - ("Root=1-58406520-a006649127e371903a2de979;Self=1-58406520-bf42676c05e20ba4a90e448e;Parent=4c721bf33e3caf8f;Sampled=1;RandomKey=RandomValue", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e,randomkey=RandomValue").unwrap())), - ] - } - - #[rustfmt::skip] - fn inject_test_data() -> Vec<(&'static str, SpanContext)> { - vec![ - ("", SpanContext::empty_context()), - ("", SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - ("", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - ("", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=0", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::default(), true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::default())), - ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=?;Self=1-58406520-bf42676c05e20ba4a90e448e;Randomkey=RandomValue", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e,randomkey=RandomValue").unwrap())), - ] - } - - #[test] - fn test_extract() { - for (header, expected) in extract_test_data() { - let map: HashMap = - vec![(AWS_XRAY_TRACE_HEADER.to_string(), header.to_string())] - .into_iter() - .collect(); - - let propagator = XrayPropagator::default(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &expected); - } - } - - #[test] - fn test_extract_empty() { - let map: HashMap = HashMap::new(); - let propagator = XrayPropagator::default(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &SpanContext::empty_context()) - } - - #[test] - fn test_inject() { - let propagator = XrayPropagator::default(); - for (header_value, span_context) in inject_test_data() { - let mut injector: HashMap = HashMap::new(); - propagator.inject_context( - &Context::current_with_span(TestSpan(span_context)), - &mut injector, - ); - - let injected_value: Option<&String> = injector.get(AWS_XRAY_TRACE_HEADER); - - if header_value.is_empty() { - assert!(injected_value.is_none()); - } else { - assert_eq!(injected_value, Some(&header_value.to_string())); - } - } - } - } -} diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md deleted file mode 100644 index d942534684..0000000000 --- a/opentelemetry-contrib/CHANGELOG.md +++ /dev/null @@ -1,100 +0,0 @@ -# Changelog - -## vNext - -## v0.13.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) - -## v0.12.0 - -### Added - -- Implement w3c trace context response propagation #998 - -### Changed - -- update to opentelemetry-api v0.20.0 - -## v0.11.0 - -### Changed -- Handle `parent_span_id` in jaeger JSON exporter [#907](https://github.com/open-telemetry/opentelemetry-rust/pull/907). -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). -- Implement w3c trace context response propagation [#998](https://github.com/open-telemetry/opentelemetry-rust/pull/998). - -## v0.10.0 - -### Added - -- Add jaeger JSON file exporter #814 - -### Changed - -- Rename binary propagator's functions #776 -- Update to opentelemetry v0.18.0 - -## v0.9.0 - -### Changed - -- Update to opentelemetry v0.17.0 - -## v0.8.0 - -### Changed - -- Update to opentelemetry v0.16.0 - -## v0.7.0 - -### Changed - -- Update to opentelemetry v0.15.0 - -## v0.6.0 - -### Changed - -- Update to opentelemetry v0.14.0 - -## v0.5.0 - -### Removed -- Moved aws related function to `opentelemetry-aws` crate. #446 -- Moved datadog related function to `opentelemetry-datadog` crate. #446 - -### Changed - -- Update to opentelemetry v0.13.0 - -## v0.4.0 - -### Changed - -- Update to opentelemetry v0.12.0 -- Support tokio v1.0 #421 -- Use opentelemetry-http for http integration #415 - -## v0.3.0 - -### Changed - -- Update to opentelemetry v0.11.0 - -## v0.2.0 - -### Changed - -- Update to opentelemetry v0.10.0 -- Move binary propagator and base64 format to this crate #343 - -## v0.1.0 - -### Added - -- Datadog exporter diff --git a/opentelemetry-contrib/CODEOWNERS b/opentelemetry-contrib/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-contrib/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml deleted file mode 100644 index d5c6eb054d..0000000000 --- a/opentelemetry-contrib/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "opentelemetry-contrib" -version = "0.13.0" -description = "Rust contrib repo for OpenTelemetry" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" -readme = "README.md" -categories = [ - "development-tools::debugging", - "development-tools::profiling", -] -keywords = ["opentelemetry", "tracing"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[features] -api = [] -default = [] -base64_format = ["base64", "binary_propagator"] -binary_propagator = [] -jaeger_json_exporter = ["serde_json", "futures-core", "futures-util", "async-trait", "opentelemetry-semantic-conventions"] -rt-tokio = ["tokio", "opentelemetry_sdk/rt-tokio"] -rt-tokio-current-thread = ["tokio", "opentelemetry_sdk/rt-tokio-current-thread"] -rt-async-std = ["async-std", "opentelemetry_sdk/rt-async-std"] - -[dependencies] -async-std = { version = "1.10", optional = true } -async-trait = { version = "0.1", optional = true } -base64 = { version = "0.13", optional = true } -futures-core = { version = "0.3", optional = true } -futures-util = { version = "0.3", optional = true, default-features = false } -once_cell = "1.17.1" -opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.21", optional = true, path = "../opentelemetry-sdk" } -opentelemetry-semantic-conventions = { version = "0.13", optional = true, path = "../opentelemetry-semantic-conventions" } -serde_json = { version = "1", optional = true } -tokio = { version = "1.0", features = ["fs", "io-util"], optional = true } - -[dev-dependencies] -base64 = "0.13" -criterion = { version = "0.5", features = ["html_reports"] } -futures-util = { version = "0.3", default-features = false, features = ["std"] } -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["trace", "testing"] } -[target.'cfg(not(target_os = "windows"))'.dev-dependencies] -pprof = { version = "0.13", features = ["flamegraph", "criterion"] } - -[[bench]] -name = "new_span" -harness = false -required-features = ["api"] diff --git a/opentelemetry-contrib/LICENSE b/opentelemetry-contrib/LICENSE deleted file mode 100644 index 23a2acabc4..0000000000 --- a/opentelemetry-contrib/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 The OpenTelemetry Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-contrib/README.md b/opentelemetry-contrib/README.md deleted file mode 100644 index 5c8a00b46b..0000000000 --- a/opentelemetry-contrib/README.md +++ /dev/null @@ -1,23 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# OpenTelemetry Contrib - -Community supported vendor integrations for applications instrumented with [`OpenTelemetry`]. - -[![Crates.io: opentelemetry-contrib](https://img.shields.io/crates/v/opentelemetry-contrib.svg)](https://crates.io/crates/opentelemetry-contrib) -[![Documentation](https://docs.rs/opentelemetry-contrib/badge.svg)](https://docs.rs/opentelemetry-contrib) -[![LICENSE](https://img.shields.io/crates/l/opentelemetry-contrib)](./LICENSE) -[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) - -## Overview - -[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, -generate, collect, and export telemetry data (metrics, logs, and traces) for -analysis in order to understand your software's performance and behavior. This -crate provides additional propagators and exporters for sending telemetry data -to vendors or using experimental propagators like `base64`. - -[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-contrib/benches/new_span.rs b/opentelemetry-contrib/benches/new_span.rs deleted file mode 100644 index 35fc0cc980..0000000000 --- a/opentelemetry-contrib/benches/new_span.rs +++ /dev/null @@ -1,183 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use futures_util::future::BoxFuture; -use opentelemetry::{ - global::BoxedTracer, - trace::{ - mark_span_as_active, noop::NoopTracer, SpanBuilder, SpanContext, SpanId, - TraceContextExt as _, TraceFlags, TraceId, TraceState, Tracer as _, TracerProvider as _, - }, - Context, ContextGuard, -}; -use opentelemetry_contrib::trace::{ - new_span_if_parent_sampled, new_span_if_recording, TracerSource, -}; -use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, - trace::{config, Sampler, TracerProvider}, -}; -#[cfg(not(target_os = "windows"))] -use pprof::criterion::{Output, PProfProfiler}; -use std::fmt::Display; - -fn criterion_benchmark(c: &mut Criterion) { - let mut group = c.benchmark_group("new_span"); - group.throughput(Throughput::Elements(1)); - for env in [ - Environment::InContext, - Environment::NoContext, - Environment::NoSdk, - ] { - let (_provider, tracer, _guard) = env.setup(); - - for api in [Api::Alt, Api::Spec] { - let param = format!("{env}/{api}"); - group.bench_function( - BenchmarkId::new("if_parent_sampled", param.clone()), - // m2max, in-cx/alt: 530ns - // m2max, no-cx/alt: 5.9ns - // m2max, no-sdk/alt: 5.9ns - // m2max, in-cx/spec: 505ns - // m2max, no-cx/spec: 255ns - // m2max, no-sdk/spec: 170ns - |b| match api { - Api::Alt => b.iter(|| { - new_span_if_parent_sampled( - || SpanBuilder::from_name("new_span"), - TracerSource::borrowed(&tracer), - ) - .map(|cx| cx.attach()) - }), - Api::Spec => b.iter(|| mark_span_as_active(tracer.start("new_span"))), - }, - ); - group.bench_function( - BenchmarkId::new("if_recording", param.clone()), - // m2max, in-cx/alt: 8ns - // m2max, no-cx/alt: 5.9ns - // m2max, no-sdk/alt: 5.9ns - // m2max, in-cx/spec: 31ns - // m2max, no-cx/spec: 5.8ns - // m2max, no-sdk/spec: 5.7ns - |b| match api { - Api::Alt => b.iter(|| { - new_span_if_recording( - || SpanBuilder::from_name("new_span"), - TracerSource::borrowed(&tracer), - ) - .map(|cx| cx.attach()) - }), - Api::Spec => b.iter(|| { - Context::current() - .span() - .is_recording() - .then(|| mark_span_as_active(tracer.start("new_span"))) - }), - }, - ); - } - } -} - -#[derive(Copy, Clone)] -enum Api { - /// An alternative way which may be faster than what the spec recommends. - Alt, - /// The recommended way as proposed by the current opentelemetry specification. - Spec, -} - -impl Api { - const fn as_str(self) -> &'static str { - match self { - Api::Alt => "alt", - Api::Spec => "spec", - } - } -} - -impl Display for Api { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -#[derive(Copy, Clone)] -enum Environment { - /// There is an active span being sampled in the current context. - InContext, - /// There is no span in context (or there is not context). - NoContext, - /// An SDK has not been configured, so instrumentation should be noop. - NoSdk, -} - -impl Environment { - const fn as_str(self) -> &'static str { - match self { - Environment::InContext => "in-cx", - Environment::NoContext => "no-cx", - Environment::NoSdk => "no-sdk", - } - } - - fn setup(&self) -> (Option, BoxedTracer, Option) { - match self { - Environment::InContext => { - let guard = Context::current() - .with_remote_span_context(SpanContext::new( - TraceId::from(0x09251969), - SpanId::from(0x08171969), - TraceFlags::SAMPLED, - true, - TraceState::default(), - )) - .attach(); - let (provider, tracer) = parent_sampled_tracer(Sampler::AlwaysOff); - (Some(provider), tracer, Some(guard)) - } - Environment::NoContext => { - let (provider, tracer) = parent_sampled_tracer(Sampler::AlwaysOff); - (Some(provider), tracer, None) - } - Environment::NoSdk => (None, BoxedTracer::new(Box::new(NoopTracer::new())), None), - } - } -} - -impl Display for Environment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -fn parent_sampled_tracer(inner_sampler: Sampler) -> (TracerProvider, BoxedTracer) { - let provider = TracerProvider::builder() - .with_config(config().with_sampler(Sampler::ParentBased(Box::new(inner_sampler)))) - .with_simple_exporter(NoopExporter) - .build(); - let tracer = provider.tracer(module_path!()); - (provider, BoxedTracer::new(Box::new(tracer))) -} - -#[derive(Debug)] -struct NoopExporter; - -impl SpanExporter for NoopExporter { - fn export(&mut self, _spans: Vec) -> BoxFuture<'static, ExportResult> { - Box::pin(futures_util::future::ready(Ok(()))) - } -} - -#[cfg(not(target_os = "windows"))] -criterion_group! { - name = benches; - config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); - targets = criterion_benchmark -} -#[cfg(target_os = "windows")] -criterion_group! { - name = benches; - config = Criterion::default(); - targets = criterion_benchmark -} -criterion_main!(benches); diff --git a/opentelemetry-contrib/src/lib.rs b/opentelemetry-contrib/src/lib.rs deleted file mode 100644 index 7c54ebcfd0..0000000000 --- a/opentelemetry-contrib/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! # OpenTelemetry Contrib -//! -//! This is a library for extensions that are not part of the core API, but still may be useful for -//! some users. -//! -//! Typically, those include vendor specific propagators. -//! -//! ## Crate Feature Flags -//! -//! The following crate feature flags are available: -//! -//! * `binary-propagator`: Adds Experimental binary propagator to propagate trace context using binary format. -//! * `base64-format`: Enables base64 format support for binary propagators. -#![warn( - future_incompatible, - missing_debug_implementations, - missing_docs, - nonstandard_style, - rust_2018_idioms, - unreachable_pub, - unused -)] -#![cfg_attr( - docsrs, - feature(doc_cfg, doc_auto_cfg), - deny(rustdoc::broken_intra_doc_links) -)] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo.svg" -)] -#![cfg_attr(test, deny(warnings))] - -pub mod trace; diff --git a/opentelemetry-contrib/src/trace/context.rs b/opentelemetry-contrib/src/trace/context.rs deleted file mode 100644 index f196f559bc..0000000000 --- a/opentelemetry-contrib/src/trace/context.rs +++ /dev/null @@ -1,184 +0,0 @@ -use super::TracerSource; -use opentelemetry::{ - trace::{SpanBuilder, TraceContextExt as _, Tracer as _}, - Context, ContextGuard, -}; -use std::{ - fmt::{Debug, Formatter}, - ops::{Deref, DerefMut}, -}; - -/// Lazily creates a new span only if the current context has an active span, -/// which will used as the new span's parent. -/// -/// This is useful for instrumenting library crates whose activities would be -/// undesirable to see as root spans, by themselves, outside of any application -/// context. -/// -/// # Examples -/// -/// ``` -/// use opentelemetry::trace::{SpanBuilder}; -/// use opentelemetry_contrib::trace::{new_span_if_parent_sampled, TracerSource}; -/// -/// fn my_lib_fn() { -/// let _guard = new_span_if_parent_sampled( -/// || SpanBuilder::from_name("my span"), -/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), -/// ) -/// .map(|cx| cx.attach()); -/// } -/// ``` -pub fn new_span_if_parent_sampled( - builder_fn: impl FnOnce() -> SpanBuilder, - tracer: TracerSource<'_>, -) -> Option { - Context::map_current(|current| { - current.span().span_context().is_sampled().then(|| { - let builder = builder_fn(); - let span = tracer.get().build_with_context(builder, current); - current.with_span(span) - }) - }) -} - -/// Lazily creates a new span only if the current context has a recording span, -/// which will used as the new span's parent. -/// -/// This is useful for instrumenting library crates whose activities would be -/// undesirable to see as root spans, by themselves, outside of any application -/// context. -/// -/// # Examples -/// -/// ``` -/// use opentelemetry::trace::{SpanBuilder}; -/// use opentelemetry_contrib::trace::{new_span_if_recording, TracerSource}; -/// -/// fn my_lib_fn() { -/// let _guard = new_span_if_recording( -/// || SpanBuilder::from_name("my span"), -/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), -/// ) -/// .map(|cx| cx.attach()); -/// } -/// ``` -pub fn new_span_if_recording( - builder_fn: impl FnOnce() -> SpanBuilder, - tracer: TracerSource<'_>, -) -> Option { - Context::map_current(|current| { - current.span().is_recording().then(|| { - let builder = builder_fn(); - let span = tracer.get().build_with_context(builder, current); - current.with_span(span) - }) - }) -} - -/// Carries anything with an optional `opentelemetry::Context`. -/// -/// A `Contextualized` is a smart pointer which owns and instance of `T` and -/// dereferences to it automatically. The instance of `T` and its associated -/// optional `Context` can be reacquired using the `Into` trait for the associated -/// tuple type. -/// -/// This type is mostly useful when sending `T`'s through channels with logical -/// context propagation. -/// -/// # Examples -/// -/// ``` -/// use opentelemetry::trace::{SpanBuilder, TraceContextExt as _}; -/// use opentelemetry_contrib::trace::{new_span_if_parent_sampled, Contextualized, TracerSource}; - -/// enum Message{Command}; -/// let (tx, rx) = std::sync::mpsc::channel(); -/// -/// let cx = new_span_if_parent_sampled( -/// || SpanBuilder::from_name("my command"), -/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), -/// ); -/// tx.send(Contextualized::new(Message::Command, cx)); -/// -/// let msg = rx.recv().unwrap(); -/// let (msg, cx) = msg.into_inner(); -/// let _guard = cx.filter(|cx| cx.has_active_span()).map(|cx| { -/// cx.span().add_event("command received", vec![]); -/// cx.attach() -/// }); -/// ``` -pub struct Contextualized(T, Option); - -impl Contextualized { - /// Creates a new instance using the specified value and optional context. - pub fn new(value: T, cx: Option) -> Self { - Self(value, cx) - } - - /// Creates a new instance using the specified value and current context if - /// it has an active span. - pub fn pass_thru(value: T) -> Self { - Self::new( - value, - Context::map_current(|current| current.has_active_span().then(|| current.clone())), - ) - } - - /// Convert self into its constituent parts, returning a tuple. - pub fn into_inner(self) -> (T, Option) { - (self.0, self.1) - } - - /// Attach the contained context if it exists and return both the - /// associated value and an optional guard for the attached context. - pub fn attach(self) -> (T, Option) { - (self.0, self.1.map(|cx| cx.attach())) - } -} - -impl Clone for Contextualized { - fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) - } -} - -impl Debug for Contextualized { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Contextualized") - .field(&self.0) - .field(&self.1) - .finish() - } -} - -impl Deref for Contextualized { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Contextualized { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn cover_contextualized() { - let cx = Contextualized::new(17, None); - let (i, cx) = cx.into_inner(); - assert_eq!(i, 17); - assert!(cx.is_none()); - - let cx = Contextualized::pass_thru(17); - let (i, _guard) = cx.attach(); - assert_eq!(i, 17); - } -} diff --git a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs deleted file mode 100644 index 38b25aa0a3..0000000000 --- a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs +++ /dev/null @@ -1,310 +0,0 @@ -//! # Jaeger JSON file Exporter -//! - -use async_trait::async_trait; -use futures_core::future::BoxFuture; -use futures_util::FutureExt; -use opentelemetry::trace::{SpanId, TraceError}; -use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, - runtime::RuntimeChannel, - trace::{Tracer, TracerProvider}, -}; -use opentelemetry_semantic_conventions::SCHEMA_URL; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::time::SystemTime; - -/// An exporter for jaeger comptible json files containing trace data -#[derive(Debug)] -pub struct JaegerJsonExporter { - out_path: PathBuf, - file_prefix: String, - service_name: String, - runtime: R, -} - -impl JaegerJsonExporter { - /// Configure a new jaeger-json exporter - /// - /// * `out_path` refers to an directory where span data are written. If it does not exist, it is created by the exporter - /// * `file_prefix` refers to a prefix prependend to each span file - /// * `service_name` is used to identify the corresponding service in jaeger - /// * `runtime` specifies the used async runtime to write the trace data - pub fn new(out_path: PathBuf, file_prefix: String, service_name: String, runtime: R) -> Self { - Self { - out_path, - file_prefix, - service_name, - runtime, - } - } - - /// Install the exporter using the internal provided runtime - pub fn install_batch(self) -> Tracer { - let runtime = self.runtime.clone(); - let provider_builder = TracerProvider::builder().with_batch_exporter(self, runtime); - let provider = provider_builder.build(); - let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( - &provider, - "opentelemetry", - Some(env!("CARGO_PKG_VERSION")), - Some(SCHEMA_URL), - None, - ); - let _ = opentelemetry::global::set_tracer_provider(provider); - - tracer - } -} - -impl SpanExporter for JaegerJsonExporter { - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { - let mut trace_map = HashMap::new(); - - for span in batch { - let ctx = &span.span_context; - trace_map - .entry(ctx.trace_id()) - .or_insert_with(Vec::new) - .push(span_data_to_jaeger_json(span)); - } - - let data = trace_map - .into_iter() - .map(|(trace_id, spans)| { - serde_json::json!({ - "traceID": trace_id.to_string(), - "spans": spans, - "processes": { - "p1": { - "serviceName": self.service_name, - "tags": [] - } - } - }) - }) - .collect::>(); - - let json = serde_json::json!({ - "data": data, - }); - - let runtime = self.runtime.clone(); - let out_path = self.out_path.clone(); - let file_prefix = self.file_prefix.clone(); - - async move { - runtime.create_dir(&out_path).await?; - - let file_name = out_path.join(format!( - "{}-{}.json", - file_prefix, - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("This does not fail") - .as_secs() - )); - runtime - .write_to_file( - &file_name, - &serde_json::to_vec(&json).expect("This is a valid json value"), - ) - .await?; - - Ok(()) - } - .boxed() - } -} - -fn span_data_to_jaeger_json(span: SpanData) -> serde_json::Value { - let events = span - .events - .iter() - .map(|e| { - let mut fields = e - .attributes - .iter() - .map(|a| { - let (tpe, value) = opentelemetry_value_to_json(&a.value); - serde_json::json!({ - "key": a.key.as_str(), - "type": tpe, - "value": value, - }) - }) - .collect::>(); - fields.push(serde_json::json!({ - "key": "event", - "type": "string", - "value": e.name, - })); - - serde_json::json!({ - "timestamp": e.timestamp.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, - "fields": fields, - }) - }) - .collect::>(); - let tags = span - .attributes - .iter() - .map(|kv| { - let (tpe, value) = opentelemetry_value_to_json(&kv.value); - serde_json::json!({ - "key": kv.key.as_str(), - "type": tpe, - "value": value, - }) - }) - .collect::>(); - let mut references = if span.links.is_empty() { - None - } else { - Some( - span.links - .iter() - .map(|link| { - let span_context = &link.span_context; - serde_json::json!({ - "refType": "FOLLOWS_FROM", - "traceID": span_context.trace_id().to_string(), - "spanID": span_context.span_id().to_string(), - }) - }) - .collect::>(), - ) - }; - if span.parent_span_id != SpanId::INVALID { - let val = serde_json::json!({ - "refType": "CHILD_OF", - "traceID": span.span_context.trace_id().to_string(), - "spanID": span.parent_span_id.to_string(), - }); - references.get_or_insert_with(Vec::new).push(val); - } - serde_json::json!({ - "traceID": span.span_context.trace_id().to_string(), - "spanID": span.span_context.span_id().to_string(), - "startTime": span.start_time.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, - "duration": span.end_time.duration_since(span.start_time).expect("This does not fail").as_micros() as i64, - "operationName": span.name, - "tags": tags, - "logs": events, - "flags": span.span_context.trace_flags().to_u8(), - "processID": "p1", - "warnings": None::, - "references": references, - }) -} - -fn opentelemetry_value_to_json(value: &opentelemetry::Value) -> (&str, serde_json::Value) { - match value { - opentelemetry::Value::Bool(b) => ("bool", serde_json::json!(b)), - opentelemetry::Value::I64(i) => ("int64", serde_json::json!(i)), - opentelemetry::Value::F64(f) => ("float64", serde_json::json!(f)), - opentelemetry::Value::String(s) => ("string", serde_json::json!(s.as_str())), - v @ opentelemetry::Value::Array(_) => ("string", serde_json::json!(v.to_string())), - } -} - -/// Jaeger Json Runtime is an extension to [`RuntimeChannel`]. -/// -/// [`RuntimeChannel`]: opentelemetry_sdk::runtime::RuntimeChannel -#[async_trait] -pub trait JaegerJsonRuntime: RuntimeChannel + std::fmt::Debug { - /// Create a new directory if the given path does not exist yet - async fn create_dir(&self, path: &Path) -> ExportResult; - /// Write the provided content to a new file at the given path - async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult; -} - -#[cfg(feature = "rt-tokio")] -#[async_trait] -impl JaegerJsonRuntime for opentelemetry_sdk::runtime::Tokio { - async fn create_dir(&self, path: &Path) -> ExportResult { - if tokio::fs::metadata(path).await.is_err() { - tokio::fs::create_dir_all(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))? - } - - Ok(()) - } - - async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { - use tokio::io::AsyncWriteExt; - - let mut file = tokio::fs::File::create(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.write_all(content) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.sync_data() - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - - Ok(()) - } -} - -#[cfg(feature = "rt-tokio-current-thread")] -#[async_trait] -impl JaegerJsonRuntime for opentelemetry_sdk::runtime::TokioCurrentThread { - async fn create_dir(&self, path: &Path) -> ExportResult { - if tokio::fs::metadata(path).await.is_err() { - tokio::fs::create_dir_all(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))? - } - - Ok(()) - } - - async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { - use tokio::io::AsyncWriteExt; - - let mut file = tokio::fs::File::create(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.write_all(content) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.sync_data() - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - - Ok(()) - } -} - -#[cfg(feature = "rt-async-std")] -#[async_trait] -impl JaegerJsonRuntime for opentelemetry_sdk::runtime::AsyncStd { - async fn create_dir(&self, path: &Path) -> ExportResult { - if async_std::fs::metadata(path).await.is_err() { - async_std::fs::create_dir_all(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - } - Ok(()) - } - - async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { - use async_std::io::WriteExt; - - let mut file = async_std::fs::File::create(path) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.write_all(content) - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - file.sync_data() - .await - .map_err(|e| TraceError::Other(Box::new(e)))?; - - Ok(()) - } -} diff --git a/opentelemetry-contrib/src/trace/exporter/mod.rs b/opentelemetry-contrib/src/trace/exporter/mod.rs deleted file mode 100644 index 6bef8e5519..0000000000 --- a/opentelemetry-contrib/src/trace/exporter/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! # Opentelemetry exporter contrib -//! -//! This module provides exporters for third party vendor format or experimental propagators that -//! aren't part of Opentelemetry. -//! -//! Currently, the following exporters are supported: -//! -//! * `jaeger_json`, which allows to export traces into files using jaegers json format -//! -//! This module also provides relative types for those exporters. - -#[cfg(feature = "jaeger_json_exporter")] -pub mod jaeger_json; diff --git a/opentelemetry-contrib/src/trace/mod.rs b/opentelemetry-contrib/src/trace/mod.rs deleted file mode 100644 index 97ee0db72f..0000000000 --- a/opentelemetry-contrib/src/trace/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! # Opentelemetry trace contrib -//! - -#[cfg(feature = "api")] -mod context; -#[cfg(feature = "api")] -pub use context::{new_span_if_parent_sampled, new_span_if_recording, Contextualized}; - -pub mod exporter; -pub mod propagator; - -#[cfg(feature = "api")] -mod tracer_source; -#[cfg(feature = "api")] -pub use tracer_source::TracerSource; diff --git a/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs b/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs deleted file mode 100644 index 45712b5ebf..0000000000 --- a/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! # Base64 Format -//! -//! `Base64Format` is a formatter to serialize and deserialize a -//! value into a base64 format. -//! -//! `Base64Format` MUST expose the APIs that serializes values into base64 strings, -//! and deserializes values from base64 strings. There is a blanket implementation -//! for any implementors of `BinaryFormat` -#[cfg(feature = "binary_propagator")] -use crate::trace::propagator::binary::binary_propagator::BinaryFormat; - -use base64::{decode, encode}; -use opentelemetry::trace::SpanContext; - -/// Used to serialize and deserialize `SpanContext`s to and from a base64 -/// representation. -pub trait Base64Format { - /// Serializes span context into a base64 encoded string - fn serialize_into_base64(&self, context: &SpanContext) -> String; - - /// Deserialize a span context from a base64 encoded string - fn deserialize_from_base64(&self, base64: &str) -> SpanContext; -} - -impl Base64Format for Format -where - Format: BinaryFormat, -{ - fn serialize_into_base64(&self, context: &SpanContext) -> String { - encode(self.serialize_into_bytes(context)) - } - - fn deserialize_from_base64(&self, base64: &str) -> SpanContext { - if let Ok(bytes) = decode(base64.as_bytes()) { - self.deserialize_from_bytes(bytes) - } else { - SpanContext::empty_context() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::trace::propagator::binary::binary_propagator::BinaryPropagator; - use opentelemetry::trace::{SpanId, TraceFlags, TraceId, TraceState}; - - #[rustfmt::skip] - fn to_base64_data() -> Vec<(SpanContext, String)> { - vec![ - (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), - "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgE=".to_string() - ), - (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), - "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgA=".to_string() - ), - ] - } - - #[rustfmt::skip] - fn from_base64_data() -> Vec<(SpanContext, String)> { - vec![ - (SpanContext::empty_context(), "invalid base64 string".to_string()) - ] - } - - #[test] - fn serialize_into_base64_conversion() { - let propagator = BinaryPropagator::new(); - - for (context, data) in to_base64_data() { - assert_eq!(propagator.serialize_into_base64(&context), data) - } - } - - #[test] - fn deserialize_from_base64_conversion() { - let propagator = BinaryPropagator::new(); - - for (context, data) in from_base64_data() { - assert_eq!(propagator.deserialize_from_base64(&data), context) - } - for (context, data) in to_base64_data() { - assert_eq!(propagator.deserialize_from_base64(&data), context) - } - } -} diff --git a/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs b/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs deleted file mode 100644 index 2f8eba7342..0000000000 --- a/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs +++ /dev/null @@ -1,178 +0,0 @@ -//! # Binary Propagator -//! -//! `BinaryFormat` is a formatter to serialize and deserialize a -//! value into a binary format. -//! -//! `BinaryFormat` MUST expose the APIs that serializes values into bytes, -//! and deserializes values from bytes. -use opentelemetry::trace::{SpanContext, SpanId, TraceFlags, TraceId, TraceState}; -use std::convert::TryInto; - -/// Used to serialize and deserialize `SpanContext`s to and from a binary -/// representation. -pub trait BinaryFormat { - /// Serializes span context into a byte array and returns the array. - fn serialize_into_bytes(&self, context: &SpanContext) -> [u8; 29]; - - /// Deserializes a span context from a byte array. - fn deserialize_from_bytes(&self, bytes: Vec) -> SpanContext; -} - -/// Extracts and injects `SpanContext`s from byte arrays. -#[derive(Debug, Default)] -pub struct BinaryPropagator {} - -impl BinaryPropagator { - /// Create a new binary propagator. - pub fn new() -> Self { - BinaryPropagator {} - } -} - -impl BinaryFormat for BinaryPropagator { - /// Serializes span context into a byte array and returns the array. - fn serialize_into_bytes(&self, context: &SpanContext) -> [u8; 29] { - let mut res = [0u8; 29]; - if !context.is_valid() { - return res; - } - res[2..18].copy_from_slice(&context.trace_id().to_bytes()); - res[18] = 1; - res[19..27].copy_from_slice(&context.span_id().to_bytes()); - res[27] = 2; - res[28] = context.trace_flags().to_u8(); - - res - } - - /// Deserializes a span context from a byte array. - fn deserialize_from_bytes(&self, bytes: Vec) -> SpanContext { - if bytes.is_empty() { - return SpanContext::empty_context(); - } - let trace_id: [u8; 16]; - let mut span_id = [0; 8]; - let mut trace_flags = 0; - let mut b = &bytes[1..]; - if b.len() >= 17 && b[0] == 0 { - trace_id = b[1..17].try_into().unwrap(); - b = &b[17..]; - } else { - return SpanContext::empty_context(); - } - if b.len() >= 9 && b[0] == 1 { - span_id = b[1..9].try_into().unwrap(); - b = &b[9..]; - } - if b.len() >= 2 && b[0] == 2 { - trace_flags = b[1] - } - - let span_context = SpanContext::new( - TraceId::from_bytes(trace_id), - SpanId::from_bytes(span_id), - TraceFlags::new(trace_flags), - true, - // TODO traceparent and tracestate should both begin with a 0 byte, figure out how to differentiate - TraceState::default(), - ); - - if span_context.is_valid() { - span_context - } else { - SpanContext::empty_context() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use opentelemetry::trace::TraceState; - - #[rustfmt::skip] - fn to_bytes_data() -> Vec<(SpanContext, [u8; 29])> { - vec![ - // Context with sampled - (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), [ - 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - 0x02, 0x01, - ]), - // Context without sampled - (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), [ - 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - 0x02, 0x00, - ]), - // Invalid context - (SpanContext::empty_context(), [0u8; 29]), - ] - } - - #[rustfmt::skip] - fn from_bytes_data() -> Vec<(SpanContext, Vec)> { - vec![ - // Future version of the proto - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ - 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - 0x02, 0x01, - ]), - // current version with sampled - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ - 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - 0x02, 0x01, - ]), - // valid context without option - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), vec![ - 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - ]), - // zero trace id - (SpanContext::empty_context(), vec![ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x01, - ]), - // zero span id - (SpanContext::empty_context(), vec![ - 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x01, - ]), - // wrong trace id field number - (SpanContext::empty_context(), vec![ - 0x00, 0x01, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, - 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, - ]), - // short byte array - (SpanContext::empty_context(), vec![ - 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, - ]), - ] - } - - #[test] - fn serialize_into_bytes_conversion() { - let propagator = BinaryPropagator::new(); - - for (context, data) in to_bytes_data() { - assert_eq!(propagator.serialize_into_bytes(&context), data) - } - } - - #[test] - fn deserialize_from_bytes_conversion() { - let propagator = BinaryPropagator::new(); - - for (context, data) in from_bytes_data() { - assert_eq!(propagator.deserialize_from_bytes(data), context) - } - } -} diff --git a/opentelemetry-contrib/src/trace/propagator/binary/mod.rs b/opentelemetry-contrib/src/trace/propagator/binary/mod.rs deleted file mode 100644 index 6b16bd4b38..0000000000 --- a/opentelemetry-contrib/src/trace/propagator/binary/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -//! # OpenTelemetry Experimental Propagator interface -//! -//! ## Binary Format -//! -//! `BinaryFormat` is a formatter to serialize and deserialize a value -//! into a binary format. -//! -//! `BinaryFormat` MUST expose the APIs that serializes values into bytes, -//! and deserializes values from bytes. -//! -//! ### ToBytes -//! -//! Serializes the given value into the on-the-wire representation. -//! -//! Required arguments: -//! -//! - the value to serialize, can be `SpanContext` or `DistributedContext`. -//! -//! Returns the on-the-wire byte representation of the value. -//! -//! ### FromBytes -//! -//! Creates a value from the given on-the-wire encoded representation. -//! -//! If the value could not be parsed, the underlying implementation -//! SHOULD decide to return ether an empty value, an invalid value, or -//! a valid value. -//! -//! Required arguments: -//! -//! - on-the-wire byte representation of the value. -//! -//! Returns a value deserialized from bytes. -//! - -#[cfg(feature = "base64")] -mod base64_format; -mod binary_propagator; - -#[cfg(feature = "base64")] -pub use base64_format::Base64Format; -pub use binary_propagator::{BinaryFormat, BinaryPropagator}; diff --git a/opentelemetry-contrib/src/trace/propagator/mod.rs b/opentelemetry-contrib/src/trace/propagator/mod.rs deleted file mode 100644 index eb9c61b6a0..0000000000 --- a/opentelemetry-contrib/src/trace/propagator/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! # Opentelemetry propagator contrib -//! -//! This module provides propagators for third party vendor format or experimental propagators that -//! aren't part of Opentelemetry. -//! -//! Currently, the following propagators are supported: -//! -//! * `binary_propagator`, propagating trace context in the binary format. -//! -//! This module also provides relative types for those propagators. -pub mod binary; -pub mod trace_context_response; diff --git a/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs b/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs deleted file mode 100644 index 45098ba8d7..0000000000 --- a/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs +++ /dev/null @@ -1,236 +0,0 @@ -//! # W3C Trace Context HTTP Response Propagator -//! -//! The traceresponse HTTP response header field identifies a completed request -//! in a tracing system. It has four fields: -//! -//! - version -//! - trace-id -//! - parent-id -//! - trace-flags -//! -//! See the [w3c trace-context docs] for more details. -//! -//! [w3c trace-context docs]: https://w3c.github.io/trace-context/#traceresponse-header -use once_cell::sync::Lazy; -use opentelemetry::{ - propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, - trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, - Context, -}; - -const SUPPORTED_VERSION: u8 = 0; -const MAX_VERSION: u8 = 254; -const TRACERESPONSE_HEADER: &str = "traceresponse"; - -static TRACE_CONTEXT_HEADER_FIELDS: Lazy<[String; 1]> = - Lazy::new(|| [TRACERESPONSE_HEADER.to_owned()]); - -/// Propagates trace response using the [W3C TraceContext] format -/// -/// [W3C TraceContext]: https://w3c.github.io/trace-context/#traceresponse-header -#[derive(Clone, Debug, Default)] -pub struct TraceContextResponsePropagator { - _private: (), -} - -impl TraceContextResponsePropagator { - /// Create a new `TraceContextPropagator`. - pub fn new() -> Self { - TraceContextResponsePropagator { _private: () } - } - - /// Extract span context from w3c trace-context header. - fn extract_span_context(&self, extractor: &dyn Extractor) -> Result { - let header_value = extractor.get(TRACERESPONSE_HEADER).unwrap_or("").trim(); - let parts = header_value.split_terminator('-').collect::>(); - // Ensure parts are not out of range. - if parts.len() < 4 { - return Err(()); - } - - // Ensure version is within range, for version 0 there must be 4 parts. - let version = u8::from_str_radix(parts[0], 16).map_err(|_| ())?; - if version > MAX_VERSION || version == 0 && parts.len() != 4 { - return Err(()); - } - - // Ensure trace id is lowercase - if parts[1].chars().any(|c| c.is_ascii_uppercase()) { - return Err(()); - } - - // Parse trace id section - let trace_id = TraceId::from_hex(parts[1]).map_err(|_| ())?; - - // Ensure span id is lowercase - if parts[2].chars().any(|c| c.is_ascii_uppercase()) { - return Err(()); - } - - // Parse span id section - let span_id = SpanId::from_hex(parts[2]).map_err(|_| ())?; - - // Parse trace flags section - let opts = u8::from_str_radix(parts[3], 16).map_err(|_| ())?; - - // Ensure opts are valid for version 0 - if version == 0 && opts > 2 { - return Err(()); - } - - // Build trace flags clearing all flags other than the trace-context - // supported sampling bit. - let trace_flags = TraceFlags::new(opts) & TraceFlags::SAMPLED; - - // create context - let span_context = - SpanContext::new(trace_id, span_id, trace_flags, true, TraceState::default()); - - // Ensure span is valid - if !span_context.is_valid() { - return Err(()); - } - - Ok(span_context) - } -} - -impl TextMapPropagator for TraceContextResponsePropagator { - /// Properly encodes the values of the `SpanContext` and injects them - /// into the `Injector`. - fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { - let span = cx.span(); - let span_context = span.span_context(); - if span_context.is_valid() { - let header_value = format!( - "{:02x}-{}-{}-{:02x}", - SUPPORTED_VERSION, - span_context.trace_id(), - span_context.span_id(), - span_context.trace_flags() & TraceFlags::SAMPLED - ); - injector.set(TRACERESPONSE_HEADER, header_value); - } - } - - /// Retrieves encoded `SpanContext`s using the `Extractor`. It decodes - /// the `SpanContext` and returns it. If no `SpanContext` was retrieved - /// OR if the retrieved SpanContext is invalid then an empty `SpanContext` - /// is returned. - fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - self.extract_span_context(extractor) - .map(|sc| cx.with_remote_span_context(sc)) - .unwrap_or_else(|_| cx.clone()) - } - - fn fields(&self) -> FieldIter<'_> { - FieldIter::new(TRACE_CONTEXT_HEADER_FIELDS.as_ref()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use opentelemetry::{ - propagation::{Extractor, TextMapPropagator}, - testing::trace::TestSpan, - trace::{SpanContext, SpanId, TraceId, TraceState}, - }; - use std::{collections::HashMap, str::FromStr}; - - #[rustfmt::skip] - fn extract_data() -> Vec<(&'static str, SpanContext)> { - vec![ - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-08", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("01-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ] - } - - #[rustfmt::skip] - fn extract_data_invalid() -> Vec<(&'static str, &'static str)> { - vec![ - ("0000-00000000000000000000000000000000-0000000000000000-01", "wrong version length"), - ("00-ab00000000000000000000000000000000-cd00000000000000-01", "wrong trace ID length"), - ("00-ab000000000000000000000000000000-cd0000000000000000-01", "wrong span ID length"), - ("00-ab000000000000000000000000000000-cd00000000000000-0100", "wrong trace flag length"), - ("qw-00000000000000000000000000000000-0000000000000000-01", "bogus version"), - ("00-qw000000000000000000000000000000-cd00000000000000-01", "bogus trace ID"), - ("00-ab000000000000000000000000000000-qw00000000000000-01", "bogus span ID"), - ("00-ab000000000000000000000000000000-cd00000000000000-qw", "bogus trace flag"), - ("A0-00000000000000000000000000000000-0000000000000000-01", "upper case version"), - ("00-AB000000000000000000000000000000-cd00000000000000-01", "upper case trace ID"), - ("00-ab000000000000000000000000000000-CD00000000000000-01", "upper case span ID"), - ("00-ab000000000000000000000000000000-cd00000000000000-A1", "upper case trace flag"), - ("00-00000000000000000000000000000000-0000000000000000-01", "zero trace ID and span ID"), - ("00-ab000000000000000000000000000000-cd00000000000000-09", "trace-flag unused bits set"), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7", "missing options"), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-", "empty options"), - ] - } - - #[rustfmt::skip] - fn inject_data() -> Vec<(&'static str, SpanContext)> { - vec![ - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::from_str("foo=bar").unwrap())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::from_str("foo=bar").unwrap())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::new(0xff), true, TraceState::from_str("foo=bar").unwrap())), - ("", SpanContext::empty_context()), - ] - } - - #[test] - fn extract_w3c_traceresponse() { - let propagator = TraceContextResponsePropagator::new(); - - for (traceresponse, expected_context) in extract_data() { - let mut extractor = HashMap::new(); - extractor.insert(TRACERESPONSE_HEADER.to_string(), traceresponse.to_string()); - - assert_eq!( - propagator.extract(&extractor).span().span_context(), - &expected_context - ) - } - } - - #[test] - fn extract_w3c_traceresponse_reject_invalid() { - let propagator = TraceContextResponsePropagator::new(); - - for (invalid_header, reason) in extract_data_invalid() { - let mut extractor = HashMap::new(); - extractor.insert(TRACERESPONSE_HEADER.to_string(), invalid_header.to_string()); - - assert_eq!( - propagator.extract(&extractor).span().span_context(), - &SpanContext::empty_context(), - "{}", - reason - ) - } - } - - #[test] - fn inject_w3c_traceresponse() { - let propagator = TraceContextResponsePropagator::new(); - - for (expected_trace_response, context) in inject_data() { - let mut injector = HashMap::new(); - propagator.inject_context( - &Context::current_with_span(TestSpan(context)), - &mut injector, - ); - - assert_eq!( - Extractor::get(&injector, TRACERESPONSE_HEADER).unwrap_or(""), - expected_trace_response - ); - } - } -} diff --git a/opentelemetry-contrib/src/trace/tracer_source.rs b/opentelemetry-contrib/src/trace/tracer_source.rs deleted file mode 100644 index fdef67a13d..0000000000 --- a/opentelemetry-contrib/src/trace/tracer_source.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Abstracts away details for acquiring a `Tracer` by instrumented libraries. -use once_cell::sync::OnceCell; -use opentelemetry::global::BoxedTracer; -use std::fmt::Debug; - -/// Holds either a borrowed `BoxedTracer` or a factory that can produce one when -/// and if needed. -/// -/// This unifies handling of obtaining a `Tracer` by library code optimizing for -/// common cases when it will never be needed. -#[derive(Debug)] -pub struct TracerSource<'a> { - variant: Variant<'a>, - tracer: OnceCell, -} - -enum Variant<'a> { - Borrowed(&'a BoxedTracer), - Lazy(&'a dyn Fn() -> BoxedTracer), -} - -impl<'a> TracerSource<'a> { - /// Construct an instance by borrowing the specified `BoxedTracer`. - pub fn borrowed(tracer: &'a BoxedTracer) -> Self { - Self { - variant: Variant::Borrowed(tracer), - tracer: OnceCell::new(), - } - } - - /// Construct an instance which may lazily produce a `BoxedTracer` using - /// the specified factory function. - pub fn lazy(factory: &'a dyn Fn() -> BoxedTracer) -> Self { - Self { - variant: Variant::Lazy(factory), - tracer: OnceCell::new(), - } - } - - /// Get the associated `BoxedTracer`, producing it if necessary. - pub fn get(&self) -> &BoxedTracer { - use Variant::*; - match self.variant { - Borrowed(tracer) => tracer, - Lazy(factory) => self.tracer.get_or_init(factory), - } - } -} - -impl<'a> Debug for Variant<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use Variant::*; - match self { - Borrowed(arg0) => f.debug_tuple("Borrowed").field(arg0).finish(), - Lazy(_arg0) => f.debug_tuple("Lazy").finish(), - } - } -} diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md deleted file mode 100644 index 48d6227464..0000000000 --- a/opentelemetry-datadog/CHANGELOG.md +++ /dev/null @@ -1,94 +0,0 @@ -# Changelog - -## vNext - -## v0.9.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) - -### Fixed - -- Do not set an empty span as the active span when the propagator does not find a remote span. -- Change type signature of `with_http_client()` to use the provided generic as argument. - -## V0.8.0 - -### Changed - -- Update to opentelemetry-api v0.20.0 - -### Fixed - -- Fix the array encoding length of datadog version 05 exporter #1002 - -## v0.7.0 - -### Added -- [Breaking] Add support for unified tagging [#931](https://github.com/open-telemetry/opentelemetry-rust/pull/931). - -### Changed -- Update `opentelemetry` to 0.19 -- Update `opentelemetry-http` to 0.8 -- Update `opentelemetry-semantic-conventions` to 0.11. -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) -- Send resource with attributes [#880](https://github.com/open-telemetry/opentelemetry-rust/pull/880). -- Update msgpack accounting for sampling_priority [#903](https://github.com/open-telemetry/opentelemetry-rust/pull/903). -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). - -## v0.6.0 - -### Changed - -- Allow custom mapping #770 -- Update to opentelemetry v0.18.0 -- Update to opentelemetry-http v0.7.0 -- Update to opentelemetry-semantic-conventions v0.10.0 -- Parse config endpoint to remove tailing slash #787 -- Add sampling priority tag in spans #792 - -## v0.5.0 - -### Changed - -- Update to opentelemetry v0.17.0 -- Update to opentelemetry-http v0.6.0 -- Update to opentelemetry-semantic-conventions v0.9.0 - -## v0.4.0 - -### Changed - -- Update to opentelemetry v0.16.0 - -## v0.3.1 - -### Fixed - -- `status_code` must be 0 or 1 #580 - -## v0.3.0 - -### Changed - -- Update to opentelemetry v0.15.0 - -## v0.2.0 - -### Changed - -- Disable optional features for reqwest -- Remove default surf features #546 -- Update to opentelemetry v0.14.0 - -## v0.1.0 - -### Added - -- Datadog exporter #446 -- Datadog propagator #440 - -### Changed -- Rename trace config with_default_sampler to with_sampler #482 diff --git a/opentelemetry-datadog/CODEOWNERS b/opentelemetry-datadog/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-datadog/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml deleted file mode 100644 index 9b78dbe0a4..0000000000 --- a/opentelemetry-datadog/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "opentelemetry-datadog" -version = "0.9.0" -description = "Datadog exporters and propagators for OpenTelemetry" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" -readme = "README.md" -categories = [ - "development-tools::debugging", - "development-tools::profiling", -] -keywords = ["opentelemetry", "tracing"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[features] -reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] -reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] -surf-client = ["surf", "opentelemetry-http/surf"] - -[dependencies] -indexmap = "2.0" -once_cell = "1.12" -opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["trace"] } -opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["trace"] } -opentelemetry-http = { version = "0.10", path = "../opentelemetry-http" } -opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } -rmp = "0.8" -url = "2.2" -reqwest = { version = "0.11", default-features = false, optional = true } -surf = { version = "2.0", default-features = false, optional = true } -thiserror = "1.0" -itertools = "0.11" -http = "0.2" -futures-core = "0.3" - -[dev-dependencies] -async-trait = "0.1" -base64 = "0.13" -bytes = "1" -futures-util = { version = "0.3", default-features = false, features = ["io"] } -isahc = "1.4" -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["trace", "testing"] } - -[[example]] -name = "datadog" -path = "examples/datadog.rs" diff --git a/opentelemetry-datadog/LICENSE b/opentelemetry-datadog/LICENSE deleted file mode 100644 index 23a2acabc4..0000000000 --- a/opentelemetry-datadog/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 The OpenTelemetry Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-datadog/README.md b/opentelemetry-datadog/README.md deleted file mode 100644 index 4a80077ae4..0000000000 --- a/opentelemetry-datadog/README.md +++ /dev/null @@ -1,40 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# OpenTelemetry Datadog - -Community supported vendor integrations for applications instrumented with [`OpenTelemetry`]. - -[![Crates.io: opentelemetry-datadog](https://img.shields.io/crates/v/opentelemetry-datadog.svg)](https://crates.io/crates/opentelemetry-datadog) -[![Documentation](https://docs.rs/opentelemetry-datadog/badge.svg)](https://docs.rs/opentelemetry-datadog) -[![LICENSE](https://img.shields.io/crates/l/opentelemetry-datadog)](./LICENSE) -[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) - -## Overview - -[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, -generate, collect, and export telemetry data (metrics, logs, and traces) for -analysis in order to understand your software's performance and behavior. This -crate provides additional propagators and exporters for sending telemetry data -to [`Datadog`]. - -## Features - -`opentelemetry-datadog` supports following features: - -- `reqwest-blocking-client`: use `reqwest` blocking http client to send spans. -- `reqwest-client`: use `reqwest` http client to send spans. -- `surf-client`: use `surf` http client to send spans. - - -## Kitchen Sink Full Configuration - - [Example](https://docs.rs/opentelemetry-datadog/latest/opentelemetry_datadog/#kitchen-sink-full-configuration) showing how to override all configuration options. See the - [`DatadogPipelineBuilder`] docs for details of each option. - - [`DatadogPipelineBuilder`]: https://docs.rs/opentelemetry-datadog/latest/opentelemetry_datadog/struct.DatadogPipelineBuilder.html - -[`Datadog`]: https://www.datadoghq.com/ -[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-datadog/examples/README.md b/opentelemetry-datadog/examples/README.md deleted file mode 100644 index c88351d688..0000000000 --- a/opentelemetry-datadog/examples/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Datadog Exporter Example - -Sends spans to a datadog-agent collector. - -## Usage - -First run version 7.22.0 or above of the datadog-agent locally as described [here](https://docs.datadoghq.com/agent/) - -Then run the example to report spans: - -```shell -# cd opentelemetry-datadog -$ cargo run --example datadog -``` - -Traces should appear in the datadog APM dashboard diff --git a/opentelemetry-datadog/examples/datadog.rs b/opentelemetry-datadog/examples/datadog.rs deleted file mode 100644 index 1bf01f90de..0000000000 --- a/opentelemetry-datadog/examples/datadog.rs +++ /dev/null @@ -1,40 +0,0 @@ -use opentelemetry::{ - global::{self, shutdown_tracer_provider}, - trace::{Span, TraceContextExt, Tracer}, - Key, -}; -use opentelemetry_datadog::{new_pipeline, ApiVersion}; -use std::thread; -use std::time::Duration; - -fn bar() { - let tracer = global::tracer("component-bar"); - let mut span = tracer.start("bar"); - span.set_attribute(Key::new("span.type").string("sql")); - span.set_attribute(Key::new("sql.query").string("SELECT * FROM table")); - thread::sleep(Duration::from_millis(6)); - span.end() -} - -fn main() -> Result<(), Box> { - let tracer = new_pipeline() - .with_service_name("trace-demo") - .with_api_version(ApiVersion::Version05) - .install_simple()?; - - tracer.in_span("foo", |cx| { - let span = cx.span(); - span.set_attribute(Key::new("span.type").string("web")); - span.set_attribute(Key::new("http.url").string("http://localhost:8080/foo")); - span.set_attribute(Key::new("http.method").string("GET")); - span.set_attribute(Key::new("http.status_code").i64(200)); - - thread::sleep(Duration::from_millis(6)); - bar(); - thread::sleep(Duration::from_millis(6)); - }); - - shutdown_tracer_provider(); - - Ok(()) -} diff --git a/opentelemetry-datadog/src/exporter/intern.rs b/opentelemetry-datadog/src/exporter/intern.rs deleted file mode 100644 index 4a483c8d83..0000000000 --- a/opentelemetry-datadog/src/exporter/intern.rs +++ /dev/null @@ -1,53 +0,0 @@ -use indexmap::set::IndexSet; - -pub(crate) struct StringInterner { - data: IndexSet, -} - -impl StringInterner { - pub(crate) fn new() -> StringInterner { - StringInterner { - data: Default::default(), - } - } - - pub(crate) fn intern(&mut self, data: &str) -> u32 { - if let Some(idx) = self.data.get_index_of(data) { - return idx as u32; - } - self.data.insert_full(data.to_string()).0 as u32 - } - - pub(crate) fn iter(&self) -> impl Iterator { - self.data.iter() - } - - pub(crate) fn len(&self) -> u32 { - self.data.len() as u32 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_intern() { - let a = "a".to_string(); - let b = "b"; - let c = "c"; - - let mut intern = StringInterner::new(); - let a_idx = intern.intern(a.as_str()); - let b_idx = intern.intern(b); - let c_idx = intern.intern(c); - let d_idx = intern.intern(a.as_str()); - let e_idx = intern.intern(c); - - assert_eq!(a_idx, 0); - assert_eq!(b_idx, 1); - assert_eq!(c_idx, 2); - assert_eq!(d_idx, a_idx); - assert_eq!(e_idx, c_idx); - } -} diff --git a/opentelemetry-datadog/src/exporter/mod.rs b/opentelemetry-datadog/src/exporter/mod.rs deleted file mode 100644 index 576b5cb2e8..0000000000 --- a/opentelemetry-datadog/src/exporter/mod.rs +++ /dev/null @@ -1,514 +0,0 @@ -mod intern; -mod model; - -pub use model::ApiVersion; -pub use model::Error; -pub use model::FieldMappingFn; - -use crate::exporter::model::FieldMapping; -use futures_core::future::BoxFuture; -use http::{Method, Request, Uri}; -use itertools::Itertools; -use opentelemetry::{global, trace::TraceError, KeyValue}; -use opentelemetry_http::{HttpClient, ResponseExt}; -use opentelemetry_sdk::{ - export::trace::{ExportResult, SpanData, SpanExporter}, - resource::{ResourceDetector, SdkProvidedResourceDetector}, - runtime::RuntimeChannel, - trace::{Config, Tracer, TracerProvider}, - Resource, -}; -use opentelemetry_semantic_conventions as semcov; -use std::borrow::Cow; -use std::fmt::{Debug, Formatter}; -use std::sync::Arc; -use std::time::Duration; -use url::Url; - -use self::model::unified_tags::UnifiedTags; - -/// Default Datadog collector endpoint -const DEFAULT_AGENT_ENDPOINT: &str = "http://127.0.0.1:8126"; - -/// Header name used to inform the Datadog agent of the number of traces in the payload -const DATADOG_TRACE_COUNT_HEADER: &str = "X-Datadog-Trace-Count"; - -/// Header name use to inform datadog as to what version -const DATADOG_META_LANG_HEADER: &str = "Datadog-Meta-Lang"; -const DATADOG_META_TRACER_VERSION_HEADER: &str = "Datadog-Meta-Tracer-Version"; - -// Struct to hold the mapping between Opentelemetry spans and datadog spans. -pub struct Mapping { - resource: Option, - name: Option, - service_name: Option, -} - -impl Mapping { - pub fn new( - resource: Option, - name: Option, - service_name: Option, - ) -> Self { - Mapping { - resource, - name, - service_name, - } - } - pub fn empty() -> Self { - Self::new(None, None, None) - } -} - -/// Datadog span exporter -pub struct DatadogExporter { - client: Arc, - request_url: Uri, - model_config: ModelConfig, - api_version: ApiVersion, - mapping: Mapping, - unified_tags: UnifiedTags, -} - -impl DatadogExporter { - fn new( - model_config: ModelConfig, - request_url: Uri, - api_version: ApiVersion, - client: Arc, - mapping: Mapping, - unified_tags: UnifiedTags, - ) -> Self { - DatadogExporter { - client, - request_url, - model_config, - api_version, - mapping, - unified_tags, - } - } - - fn build_request(&self, batch: Vec) -> Result>, TraceError> { - let traces: Vec> = group_into_traces(batch); - let trace_count = traces.len(); - let data = self.api_version.encode( - &self.model_config, - traces, - &self.mapping, - &self.unified_tags, - )?; - let req = Request::builder() - .method(Method::POST) - .uri(self.request_url.clone()) - .header(http::header::CONTENT_TYPE, self.api_version.content_type()) - .header(DATADOG_TRACE_COUNT_HEADER, trace_count) - .header(DATADOG_META_LANG_HEADER, "rust") - .header( - DATADOG_META_TRACER_VERSION_HEADER, - env!("CARGO_PKG_VERSION"), - ) - .body(data) - .map_err::(Into::into)?; - - Ok(req) - } -} - -impl Debug for DatadogExporter { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("DatadogExporter") - .field("model_config", &self.model_config) - .field("request_url", &self.request_url) - .field("api_version", &self.api_version) - .field("client", &self.client) - .field("resource_mapping", &mapping_debug(&self.mapping.resource)) - .field("name_mapping", &mapping_debug(&self.mapping.name)) - .field( - "service_name_mapping", - &mapping_debug(&self.mapping.service_name), - ) - .finish() - } -} - -/// Create a new Datadog exporter pipeline builder. -pub fn new_pipeline() -> DatadogPipelineBuilder { - DatadogPipelineBuilder::default() -} - -/// Builder for `ExporterConfig` struct. -pub struct DatadogPipelineBuilder { - agent_endpoint: String, - trace_config: Option, - api_version: ApiVersion, - client: Option>, - mapping: Mapping, - unified_tags: UnifiedTags, -} - -impl Default for DatadogPipelineBuilder { - fn default() -> Self { - DatadogPipelineBuilder { - agent_endpoint: DEFAULT_AGENT_ENDPOINT.to_string(), - trace_config: None, - mapping: Mapping::empty(), - api_version: ApiVersion::Version05, - unified_tags: UnifiedTags::new(), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client"), - not(feature = "surf-client"), - ))] - client: None, - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client"), - feature = "surf-client" - ))] - client: Some(Arc::new(surf::Client::new())), - #[cfg(all( - not(feature = "surf-client"), - not(feature = "reqwest-blocking-client"), - feature = "reqwest-client" - ))] - client: Some(Arc::new(reqwest::Client::new())), - #[cfg(feature = "reqwest-blocking-client")] - client: Some(Arc::new(reqwest::blocking::Client::new())), - } - } -} - -impl Debug for DatadogPipelineBuilder { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("DatadogExporter") - .field("agent_endpoint", &self.agent_endpoint) - .field("trace_config", &self.trace_config) - .field("client", &self.client) - .field("resource_mapping", &mapping_debug(&self.mapping.resource)) - .field("name_mapping", &mapping_debug(&self.mapping.name)) - .field( - "service_name_mapping", - &mapping_debug(&self.mapping.service_name), - ) - .finish() - } -} - -impl DatadogPipelineBuilder { - /// Building a new exporter. - /// - /// This is useful if you are manually constructing a pipeline. - pub fn build_exporter(mut self) -> Result { - let (_, service_name) = self.build_config_and_service_name(); - self.build_exporter_with_service_name(service_name) - } - - fn build_config_and_service_name(&mut self) -> (Config, String) { - let service_name = self.unified_tags.service(); - if let Some(service_name) = service_name { - let config = if let Some(mut cfg) = self.trace_config.take() { - cfg.resource = Cow::Owned(Resource::new( - cfg.resource - .iter() - .filter(|(k, _v)| k.as_str() != semcov::resource::SERVICE_NAME) - .map(|(k, v)| KeyValue::new(k.clone(), v.clone())), - )); - cfg - } else { - Config { - resource: Cow::Owned(Resource::empty()), - ..Default::default() - } - }; - (config, service_name) - } else { - let service_name = SdkProvidedResourceDetector - .detect(Duration::from_secs(0)) - .get(semcov::resource::SERVICE_NAME.into()) - .unwrap() - .to_string(); - ( - Config { - // use a empty resource to prevent TracerProvider to assign a service name. - resource: Cow::Owned(Resource::empty()), - ..Default::default() - }, - service_name, - ) - } - } - - // parse the endpoint and append the path based on versions. - // keep the query and host the same. - fn build_endpoint(agent_endpoint: &str, version: &str) -> Result { - // build agent endpoint based on version - let mut endpoint = agent_endpoint - .parse::() - .map_err::(Into::into)?; - let mut paths = endpoint - .path_segments() - .map(|c| c.filter(|s| !s.is_empty()).collect::>()) - .unwrap_or_default(); - paths.push(version); - - let path_str = paths.join("/"); - endpoint.set_path(path_str.as_str()); - - Ok(endpoint.as_str().parse().map_err::(Into::into)?) - } - - fn build_exporter_with_service_name( - self, - service_name: String, - ) -> Result { - if let Some(client) = self.client { - let model_config = ModelConfig { service_name }; - - let exporter = DatadogExporter::new( - model_config, - Self::build_endpoint(&self.agent_endpoint, self.api_version.path())?, - self.api_version, - client, - self.mapping, - self.unified_tags, - ); - Ok(exporter) - } else { - Err(Error::NoHttpClient.into()) - } - } - - /// Install the Datadog trace exporter pipeline using a simple span processor. - pub fn install_simple(mut self) -> Result { - let (config, service_name) = self.build_config_and_service_name(); - let exporter = self.build_exporter_with_service_name(service_name)?; - let mut provider_builder = TracerProvider::builder().with_simple_exporter(exporter); - provider_builder = provider_builder.with_config(config); - let provider = provider_builder.build(); - let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( - &provider, - "opentelemetry-datadog", - Some(env!("CARGO_PKG_VERSION")), - Some(semcov::SCHEMA_URL), - None, - ); - let _ = global::set_tracer_provider(provider); - Ok(tracer) - } - - /// Install the Datadog trace exporter pipeline using a batch span processor with the specified - /// runtime. - pub fn install_batch(mut self, runtime: R) -> Result { - let (config, service_name) = self.build_config_and_service_name(); - let exporter = self.build_exporter_with_service_name(service_name)?; - let mut provider_builder = TracerProvider::builder().with_batch_exporter(exporter, runtime); - provider_builder = provider_builder.with_config(config); - let provider = provider_builder.build(); - let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( - &provider, - "opentelemetry-datadog", - Some(env!("CARGO_PKG_VERSION")), - Some(semcov::SCHEMA_URL), - None, - ); - let _ = global::set_tracer_provider(provider); - Ok(tracer) - } - - /// Assign the service name under which to group traces - pub fn with_service_name>(mut self, service_name: T) -> Self { - self.unified_tags.set_service(Some(service_name.into())); - self - } - - /// Assign the version under which to group traces - pub fn with_version>(mut self, version: T) -> Self { - self.unified_tags.set_version(Some(version.into())); - self - } - - /// Assign the env under which to group traces - pub fn with_env>(mut self, env: T) -> Self { - self.unified_tags.set_env(Some(env.into())); - self - } - - /// Assign the Datadog collector endpoint. - /// - /// The endpoint of the datadog agent, by default it is `http://127.0.0.1:8126`. - pub fn with_agent_endpoint>(mut self, endpoint: T) -> Self { - self.agent_endpoint = endpoint.into(); - self - } - - /// Choose the http client used by uploader - pub fn with_http_client(mut self, client: T) -> Self { - self.client = Some(Arc::new(client)); - self - } - - /// Assign the SDK trace configuration - pub fn with_trace_config(mut self, config: Config) -> Self { - self.trace_config = Some(config); - self - } - - /// Set version of Datadog trace ingestion API - pub fn with_api_version(mut self, api_version: ApiVersion) -> Self { - self.api_version = api_version; - self - } - - /// Custom the value used for `resource` field in datadog spans. - /// See [`FieldMappingFn`] for details. - pub fn with_resource_mapping(mut self, f: F) -> Self - where - F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, - { - self.mapping.resource = Some(Arc::new(f)); - self - } - - /// Custom the value used for `name` field in datadog spans. - /// See [`FieldMappingFn`] for details. - pub fn with_name_mapping(mut self, f: F) -> Self - where - F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, - { - self.mapping.name = Some(Arc::new(f)); - self - } - - /// Custom the value used for `service_name` field in datadog spans. - /// See [`FieldMappingFn`] for details. - pub fn with_service_name_mapping(mut self, f: F) -> Self - where - F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, - { - self.mapping.service_name = Some(Arc::new(f)); - self - } -} - -fn group_into_traces(spans: Vec) -> Vec> { - spans - .into_iter() - .into_group_map_by(|span_data| span_data.span_context.trace_id()) - .into_values() - .collect() -} - -async fn send_request( - client: Arc, - request: http::Request>, -) -> ExportResult { - let _ = client.send(request).await?.error_for_status()?; - Ok(()) -} - -impl SpanExporter for DatadogExporter { - /// Export spans to datadog-agent - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { - let request = match self.build_request(batch) { - Ok(req) => req, - Err(err) => return Box::pin(std::future::ready(Err(err))), - }; - - let client = self.client.clone(); - Box::pin(send_request(client, request)) - } -} - -/// Helper struct to custom the mapping between Opentelemetry spans and datadog spans. -/// -/// This struct will be passed to [`FieldMappingFn`] -#[derive(Default, Debug)] -#[non_exhaustive] -pub struct ModelConfig { - pub service_name: String, -} - -fn mapping_debug(f: &Option) -> String { - if f.is_some() { - "custom mapping" - } else { - "default mapping" - } - .to_string() -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::ApiVersion::Version05; - - use crate::exporter::model::tests::get_span; - - #[test] - fn test_out_of_order_group() { - let batch = vec![get_span(1, 1, 1), get_span(2, 2, 2), get_span(1, 1, 3)]; - let expected = vec![ - vec![get_span(1, 1, 1), get_span(1, 1, 3)], - vec![get_span(2, 2, 2)], - ]; - - let mut traces = group_into_traces(batch); - // We need to sort the output in order to compare, but this is not required by the Datadog agent - traces.sort_by_key(|t| u128::from_be_bytes(t[0].span_context.trace_id().to_bytes())); - - assert_eq!(traces, expected); - } - - #[test] - fn test_agent_endpoint_with_api_version() { - let with_tail_slash = - DatadogPipelineBuilder::build_endpoint("http://localhost:8126/", Version05.path()); - let without_tail_slash = - DatadogPipelineBuilder::build_endpoint("http://localhost:8126", Version05.path()); - let with_query = DatadogPipelineBuilder::build_endpoint( - "http://localhost:8126?api_key=123", - Version05.path(), - ); - let invalid = DatadogPipelineBuilder::build_endpoint( - "http://localhost:klsajfjksfh", - Version05.path(), - ); - - assert_eq!( - with_tail_slash.unwrap().to_string(), - "http://localhost:8126/v0.5/traces" - ); - assert_eq!( - without_tail_slash.unwrap().to_string(), - "http://localhost:8126/v0.5/traces" - ); - assert_eq!( - with_query.unwrap().to_string(), - "http://localhost:8126/v0.5/traces?api_key=123" - ); - assert!(invalid.is_err()) - } - - #[derive(Debug)] - struct DummyClient; - - #[async_trait::async_trait] - impl HttpClient for DummyClient { - async fn send( - &self, - _request: Request>, - ) -> Result, opentelemetry_http::HttpError> { - Ok(http::Response::new("dummy response".into())) - } - } - - #[test] - fn test_custom_http_client() { - new_pipeline() - .with_http_client(DummyClient) - .build_exporter() - .unwrap(); - } -} diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs deleted file mode 100644 index f0b626a3ec..0000000000 --- a/opentelemetry-datadog/src/exporter/model/mod.rs +++ /dev/null @@ -1,298 +0,0 @@ -use crate::exporter::ModelConfig; -use http::uri; -use opentelemetry_sdk::export::{ - trace::{self, SpanData}, - ExportError, -}; -use std::fmt::Debug; -use url::ParseError; - -use self::unified_tags::UnifiedTags; - -use super::Mapping; - -pub mod unified_tags; -mod v03; -mod v05; - -// todo: we should follow the same mapping defined in https://github.com/DataDog/datadog-agent/blob/main/pkg/trace/api/otlp.go - -// https://github.com/DataDog/dd-trace-js/blob/c89a35f7d27beb4a60165409376e170eacb194c5/packages/dd-trace/src/constants.js#L4 -static SAMPLING_PRIORITY_KEY: &str = "_sampling_priority_v1"; - -/// Custom mapping between opentelemetry spans and datadog spans. -/// -/// User can provide custom function to change the mapping. It currently supports customizing the following -/// fields in Datadog span protocol. -/// -/// |field name|default value| -/// |---------------|-------------| -/// |service name| service name configuration from [`ModelConfig`]| -/// |name | opentelemetry instrumentation library name | -/// |resource| opentelemetry name| -/// -/// The function takes a reference to [`SpanData`]() and a reference to [`ModelConfig`]() as parameters. -/// It should return a `&str` which will be used as the value for the field. -/// -/// If no custom mapping is provided. Default mapping detailed above will be used. -/// -/// For example, -/// ```no_run -/// use opentelemetry_datadog::{ApiVersion, new_pipeline}; -/// fn main() -> Result<(), opentelemetry::trace::TraceError> { -/// let tracer = new_pipeline() -/// .with_service_name("my_app") -/// .with_api_version(ApiVersion::Version05) -/// // the custom mapping below will change the all spans' name to datadog spans -/// .with_name_mapping(|span, model_config|{ -/// "datadog spans" -/// }) -/// .with_agent_endpoint("http://localhost:8126") -/// .install_batch(opentelemetry_sdk::runtime::Tokio)?; -/// -/// Ok(()) -/// } -/// ``` -pub type FieldMappingFn = dyn for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync; - -pub(crate) type FieldMapping = std::sync::Arc; - -// Datadog uses some magic tags in their models. There is no recommended mapping defined in -// opentelemetry spec. Below is default mapping we gonna uses. Users can override it by providing -// their own implementations. -fn default_service_name_mapping<'a>(_span: &'a SpanData, config: &'a ModelConfig) -> &'a str { - config.service_name.as_str() -} - -fn default_name_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { - span.instrumentation_lib.name.as_ref() -} - -fn default_resource_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { - span.name.as_ref() -} - -/// Wrap type for errors from opentelemetry datadog exporter -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Message pack error - #[error("message pack error")] - MessagePackError, - /// No http client founded. User should provide one or enable features - #[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")] - NoHttpClient, - /// Http requests failed with following errors - #[error(transparent)] - RequestError(#[from] http::Error), - /// The Uri was invalid - #[error("invalid url {0}")] - InvalidUri(String), - /// Other errors - #[error("{0}")] - Other(String), -} - -impl ExportError for Error { - fn exporter_name(&self) -> &'static str { - "datadog" - } -} - -impl From for Error { - fn from(_: rmp::encode::ValueWriteError) -> Self { - Self::MessagePackError - } -} - -impl From for Error { - fn from(err: ParseError) -> Self { - Self::InvalidUri(err.to_string()) - } -} - -impl From for Error { - fn from(err: uri::InvalidUri) -> Self { - Self::InvalidUri(err.to_string()) - } -} - -/// Version of datadog trace ingestion API -#[derive(Debug, Copy, Clone)] -#[non_exhaustive] -pub enum ApiVersion { - /// Version 0.3 - Version03, - /// Version 0.5 - requires datadog-agent v7.22.0 or above - Version05, -} - -impl ApiVersion { - pub(crate) fn path(self) -> &'static str { - match self { - ApiVersion::Version03 => "/v0.3/traces", - ApiVersion::Version05 => "/v0.5/traces", - } - } - - pub(crate) fn content_type(self) -> &'static str { - match self { - ApiVersion::Version03 => "application/msgpack", - ApiVersion::Version05 => "application/msgpack", - } - } - - pub(crate) fn encode( - self, - model_config: &ModelConfig, - traces: Vec>, - mapping: &Mapping, - unified_tags: &UnifiedTags, - ) -> Result, Error> { - match self { - Self::Version03 => v03::encode( - model_config, - traces, - |span, config| match &mapping.service_name { - Some(f) => f(span, config), - None => default_service_name_mapping(span, config), - }, - |span, config| match &mapping.name { - Some(f) => f(span, config), - None => default_name_mapping(span, config), - }, - |span, config| match &mapping.resource { - Some(f) => f(span, config), - None => default_resource_mapping(span, config), - }, - ), - Self::Version05 => v05::encode( - model_config, - traces, - |span, config| match &mapping.service_name { - Some(f) => f(span, config), - None => default_service_name_mapping(span, config), - }, - |span, config| match &mapping.name { - Some(f) => f(span, config), - None => default_name_mapping(span, config), - }, - |span, config| match &mapping.resource { - Some(f) => f(span, config), - None => default_resource_mapping(span, config), - }, - unified_tags, - ), - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use opentelemetry::{ - trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, - KeyValue, - }; - use opentelemetry_sdk::{ - self, - trace::{SpanEvents, SpanLinks}, - InstrumentationLibrary, Resource, - }; - use std::borrow::Cow; - use std::time::{Duration, SystemTime}; - - fn get_traces() -> Vec> { - vec![vec![get_span(7, 1, 99)]] - } - - pub(crate) fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64) -> trace::SpanData { - let span_context = SpanContext::new( - TraceId::from_u128(trace_id), - SpanId::from_u64(span_id), - TraceFlags::default(), - false, - TraceState::default(), - ); - - let start_time = SystemTime::UNIX_EPOCH; - let end_time = start_time.checked_add(Duration::from_secs(1)).unwrap(); - - let attributes = vec![KeyValue::new("span.type", "web")]; - let events = SpanEvents::default(); - let links = SpanLinks::default(); - let resource = Resource::new(vec![KeyValue::new("host.name", "test")]); - - trace::SpanData { - span_context, - parent_span_id: SpanId::from_u64(parent_span_id), - span_kind: SpanKind::Client, - name: "resource".into(), - start_time, - end_time, - attributes, - dropped_attributes_count: 0, - events, - links, - status: Status::Ok, - resource: Cow::Owned(resource), - instrumentation_lib: InstrumentationLibrary::new( - "component", - None::<&'static str>, - None::<&'static str>, - None, - ), - } - } - - #[test] - fn test_encode_v03() -> Result<(), Box> { - let traces = get_traces(); - let model_config = ModelConfig { - service_name: "service_name".to_string(), - ..Default::default() - }; - let encoded = base64::encode(ApiVersion::Version03.encode( - &model_config, - traces, - &Mapping::empty(), - &UnifiedTags::new(), - )?); - - assert_eq!(encoded.as_str(), "kZGMpHR5cGWjd2Vip3NlcnZpY2Wsc2VydmljZV9uYW1lpG5hbWWpY29tcG9uZW\ - 50qHJlc291cmNlqHJlc291cmNlqHRyYWNlX2lkzwAAAAAAAAAHp3NwYW5faWTPAAAAAAAAAGOpcGFyZW50X2lkzwAAAA\ - AAAAABpXN0YXJ00wAAAAAAAAAAqGR1cmF0aW9u0wAAAAA7msoApWVycm9y0gAAAACkbWV0YYKpaG9zdC5uYW1lpHRlc3\ - Spc3Bhbi50eXBlo3dlYqdtZXRyaWNzgbVfc2FtcGxpbmdfcHJpb3JpdHlfdjHLAAAAAAAAAAA="); - - Ok(()) - } - - #[test] - fn test_encode_v05() -> Result<(), Box> { - let traces = get_traces(); - let model_config = ModelConfig { - service_name: "service_name".to_string(), - ..Default::default() - }; - - let mut unified_tags = UnifiedTags::new(); - unified_tags.set_env(Some(String::from("test-env"))); - unified_tags.set_version(Some(String::from("test-version"))); - unified_tags.set_service(Some(String::from("test-service"))); - - let _encoded = base64::encode(ApiVersion::Version05.encode( - &model_config, - traces, - &Mapping::empty(), - &unified_tags, - )?); - - // TODO: Need someone to generate the expected result or instructions to do so. - // assert_eq!(encoded.as_str(), "kp6jd2VirHNlcnZpY2VfbmFtZaljb21wb25lbnSocmVzb3VyY2WpaG9zdC5uYW\ - // 1lpHRlc3Snc2VydmljZax0ZXN0LXNlcnZpY2WjZW52qHRlc3QtZW52p3ZlcnNpb26sdGVzdC12ZXJzaW9uqXNwYW4udH\ - // lwZbVfc2FtcGxpbmdfcHJpb3JpdHlfdjGRkZzOAAAAAc4AAAACzgAAAAPPAAAAAAAAAAfPAAAAAAAAAGPPAAAAAAAAAA\ - // HTAAAAAAAAAADTAAAAADuaygDSAAAAAIXOAAAABM4AAAAFzgAAAAbOAAAAB84AAAAIzgAAAAnOAAAACs4AAAALzgAAAA\ - // zOAAAAAIHOAAAADcsAAAAAAAAAAM4AAAAA"); - - Ok(()) - } -} diff --git a/opentelemetry-datadog/src/exporter/model/unified_tags.rs b/opentelemetry-datadog/src/exporter/model/unified_tags.rs deleted file mode 100644 index e4e835c550..0000000000 --- a/opentelemetry-datadog/src/exporter/model/unified_tags.rs +++ /dev/null @@ -1,123 +0,0 @@ -/// Unified tags - See: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging - -pub struct UnifiedTags { - pub service: UnifiedTagField, - pub env: UnifiedTagField, - pub version: UnifiedTagField, -} - -impl UnifiedTags { - pub fn new() -> Self { - UnifiedTags { - service: UnifiedTagField::new(UnifiedTagEnum::Service), - env: UnifiedTagField::new(UnifiedTagEnum::Env), - version: UnifiedTagField::new(UnifiedTagEnum::Version), - } - } - pub fn set_service(&mut self, service: Option) { - self.service.value = service; - } - pub fn set_version(&mut self, version: Option) { - self.version.value = version; - } - pub fn set_env(&mut self, env: Option) { - self.env.value = env; - } - pub fn service(&self) -> Option { - self.service.value.clone() - } - pub fn compute_attribute_size(&self) -> u32 { - self.service.len() + self.env.len() + self.version.len() - } -} - -pub struct UnifiedTagField { - pub value: Option, - pub kind: UnifiedTagEnum, -} - -impl UnifiedTagField { - pub fn new(kind: UnifiedTagEnum) -> Self { - UnifiedTagField { - value: kind.find_unified_tag_value(), - kind, - } - } - pub fn len(&self) -> u32 { - if self.value.is_some() { - return 1; - } - 0 - } - pub fn get_tag_name(&self) -> &'static str { - self.kind.get_tag_name() - } -} - -pub enum UnifiedTagEnum { - Service, - Version, - Env, -} - -impl UnifiedTagEnum { - fn get_env_variable_name(&self) -> &'static str { - match self { - UnifiedTagEnum::Service => "DD_SERVICE", - UnifiedTagEnum::Version => "DD_VERSION", - UnifiedTagEnum::Env => "DD_ENV", - } - } - fn get_tag_name(&self) -> &'static str { - match self { - UnifiedTagEnum::Service => "service", - UnifiedTagEnum::Version => "version", - UnifiedTagEnum::Env => "env", - } - } - fn find_unified_tag_value(&self) -> Option { - let env_name_to_check = self.get_env_variable_name(); - match std::env::var(env_name_to_check) { - Ok(tag_value) => Some(tag_value.to_lowercase()), - _ => None, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_service() { - std::env::set_var("DD_SERVICE", "test-SERVICE"); - let mut unified_tags = UnifiedTags::new(); - assert_eq!("test-service", unified_tags.service.value.clone().unwrap()); - unified_tags.set_service(Some(String::from("new_service"))); - assert_eq!("new_service", unified_tags.service().unwrap()); - std::env::remove_var("DD_SERVICE"); - } - - #[test] - fn test_env() { - std::env::set_var("DD_ENV", "test-env"); - let mut unified_tags = UnifiedTags::new(); - assert_eq!("test-env", unified_tags.env.value.clone().unwrap()); - unified_tags.set_env(Some(String::from("new_env"))); - assert_eq!("new_env", unified_tags.env.value.unwrap()); - std::env::remove_var("DD_ENV"); - } - - #[test] - fn test_version() { - std::env::set_var("DD_VERSION", "test-version-1.2.3"); - let mut unified_tags = UnifiedTags::new(); - assert_eq!( - "test-version-1.2.3", - unified_tags.version.value.clone().unwrap() - ); - unified_tags.set_version(Some(String::from("new_version"))); - assert_eq!("new_version", unified_tags.version.value.unwrap()); - std::env::remove_var("DD_VERSION"); - } -} diff --git a/opentelemetry-datadog/src/exporter/model/v03.rs b/opentelemetry-datadog/src/exporter/model/v03.rs deleted file mode 100644 index 8f27dce767..0000000000 --- a/opentelemetry-datadog/src/exporter/model/v03.rs +++ /dev/null @@ -1,126 +0,0 @@ -use crate::exporter::model::{Error, SAMPLING_PRIORITY_KEY}; -use crate::exporter::ModelConfig; -use opentelemetry::trace::Status; -use opentelemetry_sdk::export::trace::SpanData; -use std::time::SystemTime; - -pub(crate) fn encode( - model_config: &ModelConfig, - traces: Vec>, - get_service_name: S, - get_name: N, - get_resource: R, -) -> Result, Error> -where - for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, -{ - let mut encoded = Vec::new(); - rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; - - for trace in traces.into_iter() { - rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; - - for span in trace.into_iter() { - // Safe until the year 2262 when Datadog will need to change their API - let start = span - .start_time - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos() as i64; - - let duration = span - .end_time - .duration_since(span.start_time) - .map(|x| x.as_nanos() as i64) - .unwrap_or(0); - - let mut span_type_found = false; - for kv in &span.attributes { - if kv.key.as_str() == "span.type" { - span_type_found = true; - rmp::encode::write_map_len(&mut encoded, 12)?; - rmp::encode::write_str(&mut encoded, "type")?; - rmp::encode::write_str(&mut encoded, kv.value.as_str().as_ref())?; - break; - } - } - - if !span_type_found { - rmp::encode::write_map_len(&mut encoded, 11)?; - } - - // Datadog span name is OpenTelemetry component name - see module docs for more information - rmp::encode::write_str(&mut encoded, "service")?; - rmp::encode::write_str(&mut encoded, get_service_name(&span, model_config))?; - - rmp::encode::write_str(&mut encoded, "name")?; - rmp::encode::write_str(&mut encoded, get_name(&span, model_config))?; - - rmp::encode::write_str(&mut encoded, "resource")?; - rmp::encode::write_str(&mut encoded, get_resource(&span, model_config))?; - - rmp::encode::write_str(&mut encoded, "trace_id")?; - rmp::encode::write_u64( - &mut encoded, - u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, - )?; - - rmp::encode::write_str(&mut encoded, "span_id")?; - rmp::encode::write_u64( - &mut encoded, - u64::from_be_bytes(span.span_context.span_id().to_bytes()), - )?; - - rmp::encode::write_str(&mut encoded, "parent_id")?; - rmp::encode::write_u64( - &mut encoded, - u64::from_be_bytes(span.parent_span_id.to_bytes()), - )?; - - rmp::encode::write_str(&mut encoded, "start")?; - rmp::encode::write_i64(&mut encoded, start)?; - - rmp::encode::write_str(&mut encoded, "duration")?; - rmp::encode::write_i64(&mut encoded, duration)?; - - rmp::encode::write_str(&mut encoded, "error")?; - rmp::encode::write_i32( - &mut encoded, - match span.status { - Status::Error { .. } => 1, - _ => 0, - }, - )?; - - rmp::encode::write_str(&mut encoded, "meta")?; - rmp::encode::write_map_len( - &mut encoded, - (span.attributes.len() + span.resource.len()) as u32, - )?; - for (key, value) in span.resource.iter() { - rmp::encode::write_str(&mut encoded, key.as_str())?; - rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; - } - for kv in span.attributes.iter() { - rmp::encode::write_str(&mut encoded, kv.key.as_str())?; - rmp::encode::write_str(&mut encoded, kv.value.as_str().as_ref())?; - } - - rmp::encode::write_str(&mut encoded, "metrics")?; - rmp::encode::write_map_len(&mut encoded, 1)?; - rmp::encode::write_str(&mut encoded, SAMPLING_PRIORITY_KEY)?; - rmp::encode::write_f64( - &mut encoded, - if span.span_context.is_sampled() { - 1.0 - } else { - 0.0 - }, - )?; - } - } - - Ok(encoded) -} diff --git a/opentelemetry-datadog/src/exporter/model/v05.rs b/opentelemetry-datadog/src/exporter/model/v05.rs deleted file mode 100644 index f64de885d1..0000000000 --- a/opentelemetry-datadog/src/exporter/model/v05.rs +++ /dev/null @@ -1,222 +0,0 @@ -use crate::exporter::intern::StringInterner; -use crate::exporter::model::SAMPLING_PRIORITY_KEY; -use crate::exporter::{Error, ModelConfig}; -use opentelemetry::trace::Status; -use opentelemetry_sdk::export::trace::SpanData; -use std::time::SystemTime; - -use super::unified_tags::{UnifiedTagField, UnifiedTags}; - -const SPAN_NUM_ELEMENTS: u32 = 12; - -// Protocol documentation sourced from https://github.com/DataDog/datadog-agent/blob/c076ea9a1ffbde4c76d35343dbc32aecbbf99cb9/pkg/trace/api/version.go -// -// The payload is an array containing exactly 12 elements: -// -// 1. An array of all unique strings present in the payload (a dictionary referred to by index). -// 2. An array of traces, where each trace is an array of spans. A span is encoded as an array having -// exactly 12 elements, representing all span properties, in this exact order: -// -// 0: Service (uint32) -// 1: Name (uint32) -// 2: Resource (uint32) -// 3: TraceID (uint64) -// 4: SpanID (uint64) -// 5: ParentID (uint64) -// 6: Start (int64) -// 7: Duration (int64) -// 8: Error (int32) -// 9: Meta (map[uint32]uint32) -// 10: Metrics (map[uint32]float64) -// 11: Type (uint32) -// -// Considerations: -// -// - The "uint32" typed values in "Service", "Name", "Resource", "Type", "Meta" and "Metrics" represent -// the index at which the corresponding string is found in the dictionary. If any of the values are the -// empty string, then the empty string must be added into the dictionary. -// -// - None of the elements can be nil. If any of them are unset, they should be given their "zero-value". Here -// is an example of a span with all unset values: -// -// 0: 0 // Service is "" (index 0 in dictionary) -// 1: 0 // Name is "" -// 2: 0 // Resource is "" -// 3: 0 // TraceID -// 4: 0 // SpanID -// 5: 0 // ParentID -// 6: 0 // Start -// 7: 0 // Duration -// 8: 0 // Error -// 9: map[uint32]uint32{} // Meta (empty map) -// 10: map[uint32]float64{} // Metrics (empty map) -// 11: 0 // Type is "" -// -// The dictionary in this case would be []string{""}, having only the empty string at index 0. -// -pub(crate) fn encode( - model_config: &ModelConfig, - traces: Vec>, - get_service_name: S, - get_name: N, - get_resource: R, - unified_tags: &UnifiedTags, -) -> Result, Error> -where - for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, -{ - let mut interner = StringInterner::new(); - let mut encoded_traces = encode_traces( - &mut interner, - model_config, - get_service_name, - get_name, - get_resource, - traces, - unified_tags, - )?; - - let mut payload = Vec::new(); - rmp::encode::write_array_len(&mut payload, 2)?; - - rmp::encode::write_array_len(&mut payload, interner.len())?; - for data in interner.iter() { - rmp::encode::write_str(&mut payload, data)?; - } - - payload.append(&mut encoded_traces); - - Ok(payload) -} - -fn write_unified_tags( - encoded: &mut Vec, - interner: &mut StringInterner, - unified_tags: &UnifiedTags, -) -> Result<(), Error> { - write_unified_tag(encoded, interner, &unified_tags.service)?; - write_unified_tag(encoded, interner, &unified_tags.env)?; - write_unified_tag(encoded, interner, &unified_tags.version)?; - Ok(()) -} - -fn write_unified_tag( - encoded: &mut Vec, - interner: &mut StringInterner, - tag: &UnifiedTagField, -) -> Result<(), Error> { - if let Some(tag_value) = &tag.value { - rmp::encode::write_u32(encoded, interner.intern(tag.get_tag_name()))?; - rmp::encode::write_u32(encoded, interner.intern(tag_value.as_str().as_ref()))?; - } - Ok(()) -} - -fn encode_traces( - interner: &mut StringInterner, - model_config: &ModelConfig, - get_service_name: S, - get_name: N, - get_resource: R, - traces: Vec>, - unified_tags: &UnifiedTags, -) -> Result, Error> -where - for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, - for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, -{ - let mut encoded = Vec::new(); - rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; - - for trace in traces.into_iter() { - rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; - - for span in trace.into_iter() { - // Safe until the year 2262 when Datadog will need to change their API - let start = span - .start_time - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos() as i64; - - let duration = span - .end_time - .duration_since(span.start_time) - .map(|x| x.as_nanos() as i64) - .unwrap_or(0); - - let mut span_type = interner.intern(""); - for kv in &span.attributes { - if kv.key.as_str() == "span.type" { - span_type = interner.intern(kv.value.as_str().as_ref()); - break; - } - } - - // Datadog span name is OpenTelemetry component name - see module docs for more information - rmp::encode::write_array_len(&mut encoded, SPAN_NUM_ELEMENTS)?; - rmp::encode::write_u32( - &mut encoded, - interner.intern(get_service_name(&span, model_config)), - )?; - rmp::encode::write_u32(&mut encoded, interner.intern(get_name(&span, model_config)))?; - rmp::encode::write_u32( - &mut encoded, - interner.intern(get_resource(&span, model_config)), - )?; - rmp::encode::write_u64( - &mut encoded, - u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, - )?; - rmp::encode::write_u64( - &mut encoded, - u64::from_be_bytes(span.span_context.span_id().to_bytes()), - )?; - rmp::encode::write_u64( - &mut encoded, - u64::from_be_bytes(span.parent_span_id.to_bytes()), - )?; - rmp::encode::write_i64(&mut encoded, start)?; - rmp::encode::write_i64(&mut encoded, duration)?; - rmp::encode::write_i32( - &mut encoded, - match span.status { - Status::Error { .. } => 1, - _ => 0, - }, - )?; - rmp::encode::write_map_len( - &mut encoded, - (span.attributes.len() + span.resource.len()) as u32 - + unified_tags.compute_attribute_size(), - )?; - for (key, value) in span.resource.iter() { - rmp::encode::write_u32(&mut encoded, interner.intern(key.as_str()))?; - rmp::encode::write_u32(&mut encoded, interner.intern(value.as_str().as_ref()))?; - } - - write_unified_tags(&mut encoded, interner, unified_tags)?; - - for kv in span.attributes.iter() { - rmp::encode::write_u32(&mut encoded, interner.intern(kv.key.as_str()))?; - rmp::encode::write_u32(&mut encoded, interner.intern(kv.value.as_str().as_ref()))?; - } - rmp::encode::write_map_len(&mut encoded, 1)?; - rmp::encode::write_u32(&mut encoded, interner.intern(SAMPLING_PRIORITY_KEY))?; - rmp::encode::write_f64( - &mut encoded, - if span.span_context.is_sampled() { - 1.0 - } else { - 0.0 - }, - )?; - rmp::encode::write_u32(&mut encoded, span_type)?; - } - } - - Ok(encoded) -} diff --git a/opentelemetry-datadog/src/lib.rs b/opentelemetry-datadog/src/lib.rs deleted file mode 100644 index 273b9bc8f1..0000000000 --- a/opentelemetry-datadog/src/lib.rs +++ /dev/null @@ -1,401 +0,0 @@ -//! # OpenTelemetry Datadog Exporter -//! -//! An OpenTelemetry datadog exporter implementation -//! -//! See the [Datadog Docs](https://docs.datadoghq.com/agent/) for information on how to run the datadog-agent -//! -//! ## Quirks -//! -//! There are currently some incompatibilities between Datadog and OpenTelemetry, and this manifests -//! as minor quirks to this exporter. -//! -//! Firstly Datadog uses operation_name to describe what OpenTracing would call a component. -//! Or to put it another way, in OpenTracing the operation / span name's are relatively -//! granular and might be used to identify a specific endpoint. In datadog, however, they -//! are less granular - it is expected in Datadog that a service will have single -//! primary span name that is the root of all traces within that service, with an additional piece of -//! metadata called resource_name providing granularity. See [here](https://docs.datadoghq.com/tracing/guide/configuring-primary-operation/) -//! -//! The Datadog Golang API takes the approach of using a `resource.name` OpenTelemetry attribute to set the -//! resource_name. See [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/opentracer/tracer.go#L10) -//! -//! Unfortunately, this breaks compatibility with other OpenTelemetry exporters which expect -//! a more granular operation name - as per the OpenTracing specification. -//! -//! This exporter therefore takes a different approach of naming the span with the name of the -//! tracing provider, and using the span name to set the resource_name. This should in most cases -//! lead to the behaviour that users expect. -//! -//! Datadog additionally has a span_type string that alters the rendering of the spans in the web UI. -//! This can be set as the `span.type` OpenTelemetry span attribute. -//! -//! For standard values see [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/ext/app_types.go#L31). -//! -//! If the default mapping is not fit for your use case, you may change some of them by providing [`FieldMappingFn`]s in pipeline. -//! -//! ## Performance -//! -//! For optimal performance, a batch exporter is recommended as the simple exporter will export -//! each span synchronously on drop. You can enable the [`rt-tokio`], [`rt-tokio-current-thread`] -//! or [`rt-async-std`] features and specify a runtime on the pipeline to have a batch exporter -//! configured for you automatically. -//! -//! ```toml -//! [dependencies] -//! opentelemetry = { version = "*", features = ["rt-tokio"] } -//! opentelemetry-datadog = "*" -//! ``` -//! -//! ```no_run -//! # fn main() -> Result<(), opentelemetry::trace::TraceError> { -//! let tracer = opentelemetry_datadog::new_pipeline() -//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; -//! # Ok(()) -//! # } -//! ``` -//! -//! [`rt-tokio`]: https://tokio.rs -//! [`rt-tokio-current-thread`]: https://tokio.rs -//! [`rt-async-std`]: https://async.rs -//! -//! ## Bring your own http client -//! -//! Users can choose appropriate http clients to align with their runtime. -//! -//! Based on the feature enabled. The default http client will be different. If user doesn't specific -//! features or enabled `reqwest-blocking-client` feature. The blocking reqwest http client will be used as -//! default client. If `reqwest-client` feature is enabled. The async reqwest http client will be used. If -//! `surf-client` feature is enabled. The surf http client will be used. -//! -//! Note that async http clients may need specific runtime otherwise it will panic. User should make -//! sure the http client is running in appropriate runime. -//! -//! Users can always use their own http clients by implementing `HttpClient` trait. -//! -//! ## Kitchen Sink Full Configuration -//! -//! Example showing how to override all configuration options. See the -//! [`DatadogPipelineBuilder`] docs for details of each option. -//! -//! [`DatadogPipelineBuilder`]: struct.DatadogPipelineBuilder.html -//! -//! ```no_run -//! use opentelemetry::{KeyValue, trace::Tracer}; -//! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; -//! use opentelemetry_sdk::export::trace::ExportResult; -//! use opentelemetry::global::shutdown_tracer_provider; -//! use opentelemetry_datadog::{new_pipeline, ApiVersion, Error}; -//! use opentelemetry_http::{HttpClient, HttpError}; -//! use async_trait::async_trait; -//! use bytes::Bytes; -//! use futures_util::io::AsyncReadExt as _; -//! use http::{Request, Response}; -//! use std::convert::TryInto as _; -//! -//! // `reqwest` and `surf` are supported through features, if you prefer an -//! // alternate http client you can add support by implementing `HttpClient` as -//! // shown here. -//! #[derive(Debug)] -//! struct IsahcClient(isahc::HttpClient); -//! -//! #[async_trait] -//! impl HttpClient for IsahcClient { -//! async fn send(&self, request: Request>) -> Result, HttpError> { -//! let mut response = self.0.send_async(request).await?; -//! let status = response.status(); -//! let mut bytes = Vec::with_capacity(response.body().len().unwrap_or(0).try_into()?); -//! isahc::AsyncReadResponseExt::copy_to(&mut response, &mut bytes).await?; -//! -//! Ok(Response::builder() -//! .status(response.status()) -//! .body(bytes.into())?) -//! } -//! } -//! -//! fn main() -> Result<(), opentelemetry::trace::TraceError> { -//! let tracer = new_pipeline() -//! .with_service_name("my_app") -//! .with_api_version(ApiVersion::Version05) -//! .with_agent_endpoint("http://localhost:8126") -//! .with_trace_config( -//! trace::config() -//! .with_sampler(Sampler::AlwaysOn) -//! .with_id_generator(RandomIdGenerator::default()) -//! ) -//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; -//! -//! tracer.in_span("doing_work", |cx| { -//! // Traced app logic here... -//! }); -//! -//! shutdown_tracer_provider(); // sending remaining spans before exit -//! -//! Ok(()) -//! } -//! ``` - -mod exporter; - -pub use exporter::{ - new_pipeline, ApiVersion, DatadogExporter, DatadogPipelineBuilder, Error, FieldMappingFn, - ModelConfig, -}; -pub use propagator::DatadogPropagator; - -mod propagator { - use once_cell::sync::Lazy; - use opentelemetry::{ - propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, - trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, - Context, - }; - - const DATADOG_TRACE_ID_HEADER: &str = "x-datadog-trace-id"; - const DATADOG_PARENT_ID_HEADER: &str = "x-datadog-parent-id"; - const DATADOG_SAMPLING_PRIORITY_HEADER: &str = "x-datadog-sampling-priority"; - - const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - - static DATADOG_HEADER_FIELDS: Lazy<[String; 3]> = Lazy::new(|| { - [ - DATADOG_TRACE_ID_HEADER.to_string(), - DATADOG_PARENT_ID_HEADER.to_string(), - DATADOG_SAMPLING_PRIORITY_HEADER.to_string(), - ] - }); - - enum SamplingPriority { - UserReject = -1, - AutoReject = 0, - AutoKeep = 1, - UserKeep = 2, - } - - #[derive(Debug)] - enum ExtractError { - TraceId, - SpanId, - SamplingPriority, - } - - /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using Datadog's header format. - /// - /// The Datadog header format does not have an explicit spec, but can be divined from the client libraries, - /// such as [dd-trace-go] - /// - /// ## Example - /// - /// ``` - /// use opentelemetry::global; - /// use opentelemetry_datadog::DatadogPropagator; - /// - /// global::set_text_map_propagator(DatadogPropagator::default()); - /// ``` - /// - /// [dd-trace-go]: https://github.com/DataDog/dd-trace-go/blob/v1.28.0/ddtrace/tracer/textmap.go#L293 - #[derive(Clone, Debug, Default)] - pub struct DatadogPropagator { - _private: (), - } - - impl DatadogPropagator { - /// Creates a new `DatadogPropagator`. - pub fn new() -> Self { - DatadogPropagator::default() - } - - fn extract_trace_id(&self, trace_id: &str) -> Result { - trace_id - .parse::() - .map(|id| TraceId::from(id as u128)) - .map_err(|_| ExtractError::TraceId) - } - - fn extract_span_id(&self, span_id: &str) -> Result { - span_id - .parse::() - .map(SpanId::from) - .map_err(|_| ExtractError::SpanId) - } - - fn extract_sampling_priority( - &self, - sampling_priority: &str, - ) -> Result { - let i = sampling_priority - .parse::() - .map_err(|_| ExtractError::SamplingPriority)?; - - match i { - -1 => Ok(SamplingPriority::UserReject), - 0 => Ok(SamplingPriority::AutoReject), - 1 => Ok(SamplingPriority::AutoKeep), - 2 => Ok(SamplingPriority::UserKeep), - _ => Err(ExtractError::SamplingPriority), - } - } - - fn extract_span_context( - &self, - extractor: &dyn Extractor, - ) -> Result { - let trace_id = - self.extract_trace_id(extractor.get(DATADOG_TRACE_ID_HEADER).unwrap_or(""))?; - // If we have a trace_id but can't get the parent span, we default it to invalid instead of completely erroring - // out so that the rest of the spans aren't completely lost - let span_id = self - .extract_span_id(extractor.get(DATADOG_PARENT_ID_HEADER).unwrap_or("")) - .unwrap_or(SpanId::INVALID); - let sampling_priority = self.extract_sampling_priority( - extractor - .get(DATADOG_SAMPLING_PRIORITY_HEADER) - .unwrap_or(""), - ); - let sampled = match sampling_priority { - Ok(SamplingPriority::UserReject) | Ok(SamplingPriority::AutoReject) => { - TraceFlags::default() - } - Ok(SamplingPriority::UserKeep) | Ok(SamplingPriority::AutoKeep) => { - TraceFlags::SAMPLED - } - // Treat the sampling as DEFERRED instead of erroring on extracting the span context - Err(_) => TRACE_FLAG_DEFERRED, - }; - - let trace_state = TraceState::default(); - - Ok(SpanContext::new( - trace_id, - span_id, - sampled, - true, - trace_state, - )) - } - } - - impl TextMapPropagator for DatadogPropagator { - fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { - let span = cx.span(); - let span_context = span.span_context(); - if span_context.is_valid() { - injector.set( - DATADOG_TRACE_ID_HEADER, - (u128::from_be_bytes(span_context.trace_id().to_bytes()) as u64).to_string(), - ); - injector.set( - DATADOG_PARENT_ID_HEADER, - u64::from_be_bytes(span_context.span_id().to_bytes()).to_string(), - ); - - if span_context.trace_flags() & TRACE_FLAG_DEFERRED != TRACE_FLAG_DEFERRED { - let sampling_priority = if span_context.is_sampled() { - SamplingPriority::AutoKeep - } else { - SamplingPriority::AutoReject - }; - - injector.set( - DATADOG_SAMPLING_PRIORITY_HEADER, - (sampling_priority as i32).to_string(), - ); - } - } - } - - fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - self.extract_span_context(extractor) - .map(|sc| cx.with_remote_span_context(sc)) - .unwrap_or_else(|_| cx.clone()) - } - - fn fields(&self) -> FieldIter<'_> { - FieldIter::new(DATADOG_HEADER_FIELDS.as_ref()) - } - } - - #[cfg(test)] - mod tests { - use super::*; - use opentelemetry::trace::TraceState; - use opentelemetry_sdk::testing::trace::TestSpan; - use std::collections::HashMap; - - #[rustfmt::skip] - fn extract_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { - vec![ - (vec![], SpanContext::empty_context()), - (vec![(DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::empty_context()), - (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), - ] - } - - #[rustfmt::skip] - fn inject_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { - vec![ - (vec![], SpanContext::empty_context()), - (vec![], SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), - ] - } - - #[test] - fn test_extract() { - for (header_list, expected) in extract_test_data() { - let map: HashMap = header_list - .into_iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect(); - - let propagator = DatadogPropagator::default(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &expected); - } - } - - #[test] - fn test_extract_empty() { - let map: HashMap = HashMap::new(); - let propagator = DatadogPropagator::default(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &SpanContext::empty_context()) - } - - #[test] - fn test_extract_with_empty_remote_context() { - let map: HashMap = HashMap::new(); - let propagator = DatadogPropagator::default(); - let context = propagator.extract_with_context(&Context::new(), &map); - assert!(!context.has_active_span()) - } - - #[test] - fn test_inject() { - let propagator = DatadogPropagator::default(); - for (header_values, span_context) in inject_test_data() { - let mut injector: HashMap = HashMap::new(); - propagator.inject_context( - &Context::current_with_span(TestSpan(span_context)), - &mut injector, - ); - - if !header_values.is_empty() { - for (k, v) in header_values.into_iter() { - let injected_value: Option<&String> = injector.get(k); - assert_eq!(injected_value, Some(&v.to_string())); - injector.remove(k); - } - } - assert!(injector.is_empty()); - } - } - } -} diff --git a/opentelemetry-dynatrace/CHANGELOG.md b/opentelemetry-dynatrace/CHANGELOG.md deleted file mode 100644 index 495a55f66c..0000000000 --- a/opentelemetry-dynatrace/CHANGELOG.md +++ /dev/null @@ -1,31 +0,0 @@ -# Changelog - -## v0.4.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Add deprecation note to Dynatrace exporter -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) - -## v0.3.0 - -### Changed - -- Update to `opentelemetry` v0.19.0 -- Update to `opentelemetry-http` v0.8.0 -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). - -## v0.2.0 - -### Changed - -- Update to opentelemetry v0.18.0 -- Update to opentelemetry-http v0.7.0 - -## v0.1.0 - -### Added - -- Dynatrace metrics exporter diff --git a/opentelemetry-dynatrace/CODEOWNERS b/opentelemetry-dynatrace/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-dynatrace/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-dynatrace/LICENSE b/opentelemetry-dynatrace/LICENSE deleted file mode 100644 index 23a2acabc4..0000000000 --- a/opentelemetry-dynatrace/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 The OpenTelemetry Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-dynatrace/README.md b/opentelemetry-dynatrace/README.md deleted file mode 100644 index 0f978d024e..0000000000 --- a/opentelemetry-dynatrace/README.md +++ /dev/null @@ -1,16 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# Dynatrace - -The final release of this crate was 0.4.0. Dynatrace now recommends using the OTLP exporter. They also provide a [migration guide] -For an example on how to configure the OTLP exporter in a Rust application, check out the [Rust integration walk-through] page in the Dynatrace documentation. - -[migration guide]: https://www.dynatrace.com/support/help/shortlink/migrating-dynatrace-metrics-exporter-otlp-exporter#migrate-applications -[Rust integration walk-through]: https://www.dynatrace.com/support/help/shortlink/otel-wt-rust - - -## Notice Removal - -This README and directory can be removed after any time in 2024 at least 6 months from the last release date of `opentelemetry-dynatrace`. diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md deleted file mode 100644 index e65cf8f445..0000000000 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ /dev/null @@ -1,105 +0,0 @@ -# Changelog - -## vNext - -## v0.18.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) - -## v0.17.0 - -### Added - -- Send resource along with span attributes and kind/status #1035 -- Add option to authenticate with existing GCP Authentication Manager #1128 - -### Changed - -- Update gRPC schemas #992 -- Upgrade gcp-auth to 0.9 #1137 -- Update to opentelemetry v0.20.0 - -## v0.16.0 - -### Changed -- Update to `opentelemetry` v0.19. -- Update to `opentelemetry-semantic-conventions` v0.11. -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). -- Update grpc schemas [#992](https://github.com/open-telemetry/opentelemetry-rust/pull/992). - -## v0.15.0 - -### Added - -- Added mappings from OTel attributes to Google Cloud Traces #744 -- Added `MonitoredResource::CloudRunRevision` #847 - -### Changed - -- Upgrade to opentelemetry v0.18.0 -- Upgrade to opentelemetry-semantic-conventions v0.10 -- update tonic and prost #825 - -### Fixed - -- Fix `LogEntry.trace` not populated correctly #850 - -## v0.14.0 - -### Changed - -- Upgrade to new gcp_auth version (#722) -- Stop leaking dependency error types into public API (#722) -- Clarify type of MonitoredResource (#722) - -### Fixed - -- Fixed issue with futures dependency (#722) -- Don't set up logging channel if no logging is configured (#722) - -## v0.13.0 - -### Changed - -- Send export errors to global error handler (#705) -- Return `impl Future` to avoid spawning inside library (#703) -- Implement builder API to simplify configuration (#702) -- Use TLS configuration provided by tonic (#702) -- Optionally send events to Cloud Logging (#702) -- Exclude default `tonic-build` features #635 -- Update `gcp_auth` dependency to `0.5.0` #639 -- Include the server's message in error display #642 -- Update `tonic` to 0.6 #660 -- Update gcp_auth and yup-oauth2 to latest versions #700 -- Update to opentelemetry v0.17.0 - -### Fixed - -- Avoid calling log from inside exporter #709 - -## v0.12.0 - -### Changed - -- Update to opentelemetry v0.16.0 - -## v0.11.0 - -### Changed - -- Update to opentelemetry v0.15.0 - -## v0.10.0 - -### Changed - -- Update to opentelemetry v0.14.0 - -## v0.9.0 - -### Changed -- Move opentelemetry-stackdriver into opentelemetry-rust repo #487 diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml deleted file mode 100644 index b1a3f98e8e..0000000000 --- a/opentelemetry-stackdriver/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "opentelemetry-stackdriver" -version = "0.18.0" -description = "A Rust opentelemetry exporter that uploads traces to Google Stackdriver trace." -documentation = "https://docs.rs/opentelemetry-stackdriver/" -repository = "https://github.com/open-telemetry/opentelemetry-rust" -license = "Apache-2.0" -edition = "2021" -exclude = ["/proto"] -rust-version = "1.65" - -[dependencies] -async-trait = "0.1.48" -gcp_auth = { version = "0.9", optional = true } -hex = "0.4" -http = "0.2" -hyper = "0.14.2" -hyper-rustls = { version = "0.24", optional = true } -opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk" } -opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } -prost = "0.11.0" -prost-types = "0.11.1" -thiserror = "1.0.30" -tonic = { version = "0.9.0", features = ["gzip", "tls", "transport"] } -yup-oauth2 = { version = "8.1.0", optional = true } - -# Futures -futures-core = "0.3" -futures-util = { version = "0.3", default-features = false, features = ["alloc"] } -futures-channel = { version = "0.3", default-features = false, features = ["std"] } - -[features] -default = ["yup-authorizer", "tls-native-roots"] -yup-authorizer = ["hyper-rustls", "yup-oauth2"] -tls-native-roots = ["tonic/tls-roots"] -tls-webpki-roots = ["tonic/tls-webpki-roots"] - -[dev-dependencies] -reqwest = "0.11.9" -tempfile = "3.3.0" -tokio = "1" -tonic-build = "0.9.0" -walkdir = "2.3.2" -futures-util = {version = "0.3", default-features = false } diff --git a/opentelemetry-stackdriver/LICENSE-APACHE b/opentelemetry-stackdriver/LICENSE-APACHE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/opentelemetry-stackdriver/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-stackdriver/README.md b/opentelemetry-stackdriver/README.md deleted file mode 100644 index bce58731f1..0000000000 --- a/opentelemetry-stackdriver/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# opentelemetry-stackdriver - -[Documentation](https://docs.rs/opentelemetry-stackdriver/) - -This crate provides an `opentelemetry` exporter for use with Google StackDriver trace. It uses gRPC to send tracing spans. - -It is licensed under the Apache 2.0 license. Contributions are welcome. diff --git a/opentelemetry-stackdriver/proto/google/api/annotations.proto b/opentelemetry-stackdriver/proto/google/api/annotations.proto deleted file mode 100644 index efdab3db6c..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/opentelemetry-stackdriver/proto/google/api/client.proto b/opentelemetry-stackdriver/proto/google/api/client.proto deleted file mode 100644 index 227ccf3a53..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/client.proto +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/launch_stage.proto"; -import "google/protobuf/descriptor.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ClientProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // * Adding this annotation to an unannotated method is backwards - // compatible. - // * Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is - // a breaking change. - // * Re-ordering existing method signature annotations is a breaking - // change. - repeated string method_signature = 1051; -} - -extend google.protobuf.ServiceOptions { - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - string default_host = 1049; - - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - string oauth_scopes = 1050; -} - -// Required information for every language. -message CommonLanguageSettings { - // Link to automatically generated reference documentation. Example: - // https://cloud.google.com/nodejs/docs/reference/asset/latest - string reference_docs_uri = 1 [deprecated = true]; - - // The destination where API teams want this client library to be published. - repeated ClientLibraryDestination destinations = 2; -} - -// Details about how and where to publish client libraries. -message ClientLibrarySettings { - // Version of the API to apply these settings to. - string version = 1; - - // Launch stage of this version of the API. - LaunchStage launch_stage = 2; - - // When using transport=rest, the client request will encode enums as - // numbers rather than strings. - bool rest_numeric_enums = 3; - - // Settings for legacy Java features, supported in the Service YAML. - JavaSettings java_settings = 21; - - // Settings for C++ client libraries. - CppSettings cpp_settings = 22; - - // Settings for PHP client libraries. - PhpSettings php_settings = 23; - - // Settings for Python client libraries. - PythonSettings python_settings = 24; - - // Settings for Node client libraries. - NodeSettings node_settings = 25; - - // Settings for .NET client libraries. - DotnetSettings dotnet_settings = 26; - - // Settings for Ruby client libraries. - RubySettings ruby_settings = 27; - - // Settings for Go client libraries. - GoSettings go_settings = 28; -} - -// This message configures the settings for publishing [Google Cloud Client -// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) -// generated from the service config. -message Publishing { - // A list of API method settings, e.g. the behavior for methods that use the - // long-running operation pattern. - repeated MethodSettings method_settings = 2; - - // Link to a place that API users can report issues. Example: - // https://issuetracker.google.com/issues/new?component=190865&template=1161103 - string new_issue_uri = 101; - - // Link to product home page. Example: - // https://cloud.google.com/asset-inventory/docs/overview - string documentation_uri = 102; - - // Used as a tracking tag when collecting data about the APIs developer - // relations artifacts like docs, packages delivered to package managers, - // etc. Example: "speech". - string api_short_name = 103; - - // GitHub label to apply to issues and pull requests opened for this API. - string github_label = 104; - - // GitHub teams to be added to CODEOWNERS in the directory in GitHub - // containing source code for the client libraries for this API. - repeated string codeowner_github_teams = 105; - - // A prefix used in sample code when demarking regions to be included in - // documentation. - string doc_tag_prefix = 106; - - // For whom the client library is being published. - ClientLibraryOrganization organization = 107; - - // Client library settings. If the same version string appears multiple - // times in this list, then the last one wins. Settings from earlier - // settings with the same version string are discarded. - repeated ClientLibrarySettings library_settings = 109; -} - -// Settings for Java client libraries. -message JavaSettings { - // The package name to use in Java. Clobbers the java_package option - // set in the protobuf. This should be used **only** by APIs - // who have already set the language_settings.java.package_name" field - // in gapic.yaml. API teams should use the protobuf java_package option - // where possible. - // - // Example of a YAML configuration:: - // - // publishing: - // java_settings: - // library_package: com.google.cloud.pubsub.v1 - string library_package = 1; - - // Configure the Java class name to use instead of the service's for its - // corresponding generated GAPIC client. Keys are fully-qualified - // service names as they appear in the protobuf (including the full - // the language_settings.java.interface_names" field in gapic.yaml. API - // teams should otherwise use the service name as it appears in the - // protobuf. - // - // Example of a YAML configuration:: - // - // publishing: - // java_settings: - // service_class_names: - // - google.pubsub.v1.Publisher: TopicAdmin - // - google.pubsub.v1.Subscriber: SubscriptionAdmin - map service_class_names = 2; - - // Some settings. - CommonLanguageSettings common = 3; -} - -// Settings for C++ client libraries. -message CppSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Php client libraries. -message PhpSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Python client libraries. -message PythonSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Node client libraries. -message NodeSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Dotnet client libraries. -message DotnetSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Ruby client libraries. -message RubySettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Settings for Go client libraries. -message GoSettings { - // Some settings. - CommonLanguageSettings common = 1; -} - -// Describes the generator configuration for a method. -message MethodSettings { - // Describes settings to use when generating API methods that use the - // long-running operation pattern. - // All default values below are from those used in the client library - // generators (e.g. - // [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). - message LongRunning { - // Initial delay after which the first poll request will be made. - // Default value: 5 seconds. - google.protobuf.Duration initial_poll_delay = 1; - - // Multiplier to gradually increase delay between subsequent polls until it - // reaches max_poll_delay. - // Default value: 1.5. - float poll_delay_multiplier = 2; - - // Maximum time between two subsequent poll requests. - // Default value: 45 seconds. - google.protobuf.Duration max_poll_delay = 3; - - // Total polling timeout. - // Default value: 5 minutes. - google.protobuf.Duration total_poll_timeout = 4; - } - - // The fully qualified name of the method, for which the options below apply. - // This is used to find the method to apply the options. - string selector = 1; - - // Describes settings to use for long-running operations when generating - // API methods for RPCs. Complements RPCs that use the annotations in - // google/longrunning/operations.proto. - // - // Example of a YAML configuration:: - // - // publishing: - // method_behavior: - // - selector: CreateAdDomain - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes - LongRunning long_running = 2; -} - -// The organization for which the client libraries are being published. -// Affects the url where generated docs are published, etc. -enum ClientLibraryOrganization { - // Not useful. - CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0; - - // Google Cloud Platform Org. - CLOUD = 1; - - // Ads (Advertising) Org. - ADS = 2; - - // Photos Org. - PHOTOS = 3; - - // Street View Org. - STREET_VIEW = 4; -} - -// To where should client libraries be published? -enum ClientLibraryDestination { - // Client libraries will neither be generated nor published to package - // managers. - CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0; - - // Generate the client library in a repo under github.com/googleapis, - // but don't publish it to package managers. - GITHUB = 10; - - // Publish the library to package managers like nuget.org and npmjs.com. - PACKAGE_MANAGER = 20; -} diff --git a/opentelemetry-stackdriver/proto/google/api/field_behavior.proto b/opentelemetry-stackdriver/proto/google/api/field_behavior.proto deleted file mode 100644 index c4abe3b670..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/field_behavior.proto +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "FieldBehaviorProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - repeated google.api.FieldBehavior field_behavior = 1052; -} - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -enum FieldBehavior { - // Conventional default for enums. Do not use this. - FIELD_BEHAVIOR_UNSPECIFIED = 0; - - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - OPTIONAL = 1; - - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - REQUIRED = 2; - - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - OUTPUT_ONLY = 3; - - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - INPUT_ONLY = 4; - - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - IMMUTABLE = 5; - - // Denotes that a (repeated) field is an unordered list. - // This indicates that the service may provide the elements of the list - // in any arbitrary order, rather than the order the user originally - // provided. Additionally, the list's order may or may not be stable. - UNORDERED_LIST = 6; - - // Denotes that this field returns a non-empty default value if not set. - // This indicates that if the user provides the empty value in a request, - // a non-empty value will be returned. The user will not be aware of what - // non-empty value to expect. - NON_EMPTY_DEFAULT = 7; -} diff --git a/opentelemetry-stackdriver/proto/google/api/http.proto b/opentelemetry-stackdriver/proto/google/api/http.proto deleted file mode 100644 index 113fa936a0..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/http.proto +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -message HttpRule { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - string get = 2; - - // Maps to HTTP PUT. Used for replacing a resource. - string put = 3; - - // Maps to HTTP POST. Used for creating a resource or performing an action. - string post = 4; - - // Maps to HTTP DELETE. Used for deleting a resource. - string delete = 5; - - // Maps to HTTP PATCH. Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/opentelemetry-stackdriver/proto/google/api/label.proto b/opentelemetry-stackdriver/proto/google/api/label.proto deleted file mode 100644 index af294c91ca..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/label.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/label;label"; -option java_multiple_files = true; -option java_outer_classname = "LabelProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// A description of a label. -message LabelDescriptor { - // Value types that can be used as label values. - enum ValueType { - // A variable-length string. This is the default. - STRING = 0; - - // Boolean; true or false. - BOOL = 1; - - // A 64-bit signed integer. - INT64 = 2; - } - - // The label key. - string key = 1; - - // The type of data that can be assigned to the label. - ValueType value_type = 2; - - // A human-readable description for the label. - string description = 3; -} diff --git a/opentelemetry-stackdriver/proto/google/api/launch_stage.proto b/opentelemetry-stackdriver/proto/google/api/launch_stage.proto deleted file mode 100644 index 6524db5756..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/launch_stage.proto +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api;api"; -option java_multiple_files = true; -option java_outer_classname = "LaunchStageProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// The launch stage as defined by [Google Cloud Platform -// Launch Stages](https://cloud.google.com/terms/launch-stages). -enum LaunchStage { - // Do not use this default value. - LAUNCH_STAGE_UNSPECIFIED = 0; - - // The feature is not yet implemented. Users can not use it. - UNIMPLEMENTED = 6; - - // Prelaunch features are hidden from users and are only visible internally. - PRELAUNCH = 7; - - // Early Access features are limited to a closed group of testers. To use - // these features, you must sign up in advance and sign a Trusted Tester - // agreement (which includes confidentiality provisions). These features may - // be unstable, changed in backward-incompatible ways, and are not - // guaranteed to be released. - EARLY_ACCESS = 1; - - // Alpha is a limited availability test for releases before they are cleared - // for widespread use. By Alpha, all significant design issues are resolved - // and we are in the process of verifying functionality. Alpha customers - // need to apply for access, agree to applicable terms, and have their - // projects allowlisted. Alpha releases don't have to be feature complete, - // no SLAs are provided, and there are no technical support obligations, but - // they will be far enough along that customers can actually use them in - // test environments or for limited-use tests -- just like they would in - // normal production cases. - ALPHA = 2; - - // Beta is the point at which we are ready to open a release for any - // customer to use. There are no SLA or technical support obligations in a - // Beta release. Products will be complete from a feature perspective, but - // may have some open outstanding issues. Beta releases are suitable for - // limited production use cases. - BETA = 3; - - // GA features are open to all developers and are considered stable and - // fully qualified for production use. - GA = 4; - - // Deprecated features are scheduled to be shut down and removed. For more - // information, see the "Deprecation Policy" section of our [Terms of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the Deprecation - // Policy](https://cloud.google.com/terms/deprecation) documentation. - DEPRECATED = 5; -} diff --git a/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto b/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto deleted file mode 100644 index 8b97baa104..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/label.proto"; -import "google/api/launch_stage.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/monitoredres;monitoredres"; -option java_multiple_files = true; -option java_outer_classname = "MonitoredResourceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a -// type name and a set of labels. For example, the monitored resource -// descriptor for Google Compute Engine VM instances has a type of -// `"gce_instance"` and specifies the use of the labels `"instance_id"` and -// `"zone"` to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs generally -// provide a `list` method that returns the monitored resource descriptors used -// by the API. -// -message MonitoredResourceDescriptor { - // Optional. The resource name of the monitored resource descriptor: - // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where - // {type} is the value of the `type` field in this object and - // {project_id} is a project ID that provides API-specific context for - // accessing the type. APIs that do not use project information can use the - // resource name format `"monitoredResourceDescriptors/{type}"`. - string name = 5; - - // Required. The monitored resource type. For example, the type - // `"cloudsql_database"` represents databases in Google Cloud SQL. - // For a list of types, see [Monitoring resource - // types](https://cloud.google.com/monitoring/api/resources) - // and [Logging resource - // types](https://cloud.google.com/logging/docs/api/v2/resource-list). - string type = 1; - - // Optional. A concise name for the monitored resource type that might be - // displayed in user interfaces. It should be a Title Cased Noun Phrase, - // without any article or other determiners. For example, - // `"Google Cloud SQL Database"`. - string display_name = 2; - - // Optional. A detailed description of the monitored resource type that might - // be used in documentation. - string description = 3; - - // Required. A set of labels used to describe instances of this monitored - // resource type. For example, an individual Google Cloud SQL database is - // identified by values for the labels `"database_id"` and `"zone"`. - repeated LabelDescriptor labels = 4; - - // Optional. The launch stage of the monitored resource definition. - LaunchStage launch_stage = 7; -} - -// An object representing a resource that can be used for monitoring, logging, -// billing, or other purposes. Examples include virtual machine instances, -// databases, and storage devices such as disks. The `type` field identifies a -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's -// schema. Information in the `labels` field identifies the actual resource and -// its attributes according to the schema. For example, a particular Compute -// Engine VM instance could be represented by the following object, because the -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels -// `"project_id"`, `"instance_id"` and `"zone"`: -// -// { "type": "gce_instance", -// "labels": { "project_id": "my-project", -// "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -message MonitoredResource { - // Required. The monitored resource type. This field must match - // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For - // example, the type of a Compute Engine VM instance is `gce_instance`. - // Some descriptors include the service name in the type; for example, - // the type of a Datastream stream is `datastream.googleapis.com/Stream`. - string type = 1; - - // Required. Values for all of the labels listed in the associated monitored - // resource descriptor. For example, Compute Engine VM instances use the - // labels `"project_id"`, `"instance_id"`, and `"zone"`. - map labels = 2; -} - -// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. -// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to -// uniquely identify a monitored resource instance. There is some other useful -// auxiliary metadata. Monitoring and Logging use an ingestion -// pipeline to extract metadata for cloud resources of all types, and store -// the metadata in this message. -message MonitoredResourceMetadata { - // Output only. Values for predefined system metadata labels. - // System labels are a kind of metadata extracted by Google, including - // "machine_image", "vpc", "subnet_id", - // "security_group", "name", etc. - // System label values can be only strings, Boolean values, or a list of - // strings. For example: - // - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } - google.protobuf.Struct system_labels = 1; - - // Output only. A map of user-defined metadata labels. - map user_labels = 2; -} diff --git a/opentelemetry-stackdriver/proto/google/api/resource.proto b/opentelemetry-stackdriver/proto/google/api/resource.proto deleted file mode 100644 index 0ce0344f57..0000000000 --- a/opentelemetry-stackdriver/proto/google/api/resource.proto +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ResourceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // An annotation that describes a resource reference, see - // [ResourceReference][]. - google.api.ResourceReference resource_reference = 1055; -} - -extend google.protobuf.FileOptions { - // An annotation that describes a resource definition without a corresponding - // message; see [ResourceDescriptor][]. - repeated google.api.ResourceDescriptor resource_definition = 1053; -} - -extend google.protobuf.MessageOptions { - // An annotation that describes a resource definition, see - // [ResourceDescriptor][]. - google.api.ResourceDescriptor resource = 1053; -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -message ResourceDescriptor { - // A description of the historical or future-looking state of the - // resource pattern. - enum History { - // The "unset" value. - HISTORY_UNSPECIFIED = 0; - - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ORIGINALLY_SINGLE_PATTERN = 1; - - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - FUTURE_MULTI_PATTERN = 2; - } - - // A flag representing a specific style that a resource claims to conform to. - enum Style { - // The unspecified value. Do not use. - STYLE_UNSPECIFIED = 0; - - // This resource is intended to be "declarative-friendly". - // - // Declarative-friendly resources must be more strictly consistent, and - // setting this to true communicates to tools that this resource should - // adhere to declarative-friendly expectations. - // - // Note: This is used by the API linter (linter.aip.dev) to enable - // additional checks. - DECLARATIVE_FRIENDLY = 1; - } - - // The resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The `resource_type_kind` must be - // singular and must not include version numbers. - // - // Example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and - // should use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the `resource_type_kind` is 100. - string type = 1; - - // Optional. The relative resource name pattern associated with this resource - // type. The DNS prefix of the full resource name shouldn't be specified here. - // - // The path pattern must follow the syntax, which aligns with HTTP binding - // syntax: - // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; - // - // Examples: - // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - repeated string pattern = 2; - - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - string name_field = 3; - - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History history = 4; - - // The plural name used in the resource name and permission names, such as - // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // - // Note: The plural form is required even for singleton resources. See - // https://aip.dev/156 - string plural = 5; - - // The same concept of the `singular` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // Such as "project" for the `resourcemanager.googleapis.com/Project` type. - string singular = 6; - - // Style flag(s) for this resource. - // These indicate that a resource is expected to conform to a given - // style. See the specific style flags for additional information. - repeated Style style = 10; -} - -// Defines a proto annotation that describes a string field that refers to -// an API resource. -message ResourceReference { - // The resource type that the annotated field references. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } - // - // Occasionally, a field may reference an arbitrary resource. In this case, - // APIs use the special value * in their resource reference. - // - // Example: - // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } - string type = 1; - - // The resource type of a child collection that the annotated field - // references. This is useful for annotating the `parent` field that - // doesn't have a fixed resource type. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - string child_type = 2; -} diff --git a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto deleted file mode 100644 index f2b4b481c6..0000000000 --- a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.devtools.cloudtrace.v2; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Trace.V2"; -option go_package = "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace"; -option java_multiple_files = true; -option java_outer_classname = "TraceProto"; -option java_package = "com.google.devtools.cloudtrace.v2"; -option php_namespace = "Google\\Cloud\\Trace\\V2"; -option ruby_package = "Google::Cloud::Trace::V2"; - -// A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Often, a trace contains a root span -// that describes the end-to-end latency, and one or more subspans for -// its sub-operations. -// -// A trace can also contain multiple root spans, or none at all. -// Spans do not need to be contiguous. There might be -// gaps or overlaps between spans in a trace. -message Span { - option (google.api.resource) = { - type: "cloudtrace.googleapis.com/Span" - pattern: "projects/{project}/traces/{trace}/spans/{span}" - }; - - // A set of attributes as key-value pairs. - message Attributes { - // A set of attributes. Each attribute's key can be up to 128 bytes - // long. The value can be a string up to 256 bytes, a signed 64-bit integer, - // or the boolean values `true` or `false`. For example: - // - // "/instance_id": { "string_value": { "value": "my-instance" } } - // "/http/request_bytes": { "int_value": 300 } - // "abc.com/myattribute": { "bool_value": false } - map attribute_map = 1; - - // The number of attributes that were discarded. Attributes can be discarded - // because their keys are too long or because there are too many attributes. - // If this value is 0 then all attributes are valid. - int32 dropped_attributes_count = 2; - } - - // A time-stamped annotation or message event in the Span. - message TimeEvent { - // Text annotation with a set of attributes. - message Annotation { - // A user-supplied message describing the event. The maximum length for - // the description is 256 bytes. - TruncatableString description = 1; - - // A set of attributes on the annotation. You can have up to 4 attributes - // per Annotation. - Attributes attributes = 2; - } - - // An event describing a message sent/received between Spans. - message MessageEvent { - // Indicates whether the message was sent or received. - enum Type { - // Unknown event type. - TYPE_UNSPECIFIED = 0; - - // Indicates a sent message. - SENT = 1; - - // Indicates a received message. - RECEIVED = 2; - } - - // Type of MessageEvent. Indicates whether the message was sent or - // received. - Type type = 1; - - // An identifier for the MessageEvent's message that can be used to match - // `SENT` and `RECEIVED` MessageEvents. - int64 id = 2; - - // The number of uncompressed bytes sent or received. - int64 uncompressed_size_bytes = 3; - - // The number of compressed bytes sent or received. If missing, the - // compressed size is assumed to be the same size as the uncompressed - // size. - int64 compressed_size_bytes = 4; - } - - // The timestamp indicating the time the event occurred. - google.protobuf.Timestamp time = 1; - - // A `TimeEvent` can contain either an `Annotation` object or a - // `MessageEvent` object, but not both. - oneof value { - // Text annotation with a set of attributes. - Annotation annotation = 2; - - // An event describing a message sent/received between Spans. - MessageEvent message_event = 3; - } - } - - // A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation - // on the span, consisting of either user-supplied key:value pairs, or - // details of a message sent/received between Spans. - message TimeEvents { - // A collection of `TimeEvent`s. - repeated TimeEvent time_event = 1; - - // The number of dropped annotations in all the included time events. - // If the value is 0, then no annotations were dropped. - int32 dropped_annotations_count = 2; - - // The number of dropped message events in all the included time events. - // If the value is 0, then no message events were dropped. - int32 dropped_message_events_count = 3; - } - - // A pointer from the current span to another span in the same trace or in a - // different trace. For example, this can be used in batching operations, - // where a single batch handler processes multiple requests from different - // traces or when the handler receives a request from a different project. - message Link { - // The relationship of the current span relative to the linked span: child, - // parent, or unspecified. - enum Type { - // The relationship of the two spans is unknown. - TYPE_UNSPECIFIED = 0; - - // The linked span is a child of the current span. - CHILD_LINKED_SPAN = 1; - - // The linked span is a parent of the current span. - PARENT_LINKED_SPAN = 2; - } - - // The `[TRACE_ID]` for a trace within a project. - string trace_id = 1; - - // The `[SPAN_ID]` for a span within a trace. - string span_id = 2; - - // The relationship of the current span relative to the linked span. - Type type = 3; - - // A set of attributes on the link. Up to 32 attributes can be - // specified per link. - Attributes attributes = 4; - } - - // A collection of links, which are references from this span to a span - // in the same or different trace. - message Links { - // A collection of links. - repeated Link link = 1; - - // The number of dropped links after the maximum size was enforced. If - // this value is 0, then no links were dropped. - int32 dropped_links_count = 2; - } - - // Type of span. Can be used to specify additional relationships between spans - // in addition to a parent/child relationship. - enum SpanKind { - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind.INTERNAL to be default. - SPAN_KIND_UNSPECIFIED = 0; - - // Indicates that the span is used internally. Default value. - INTERNAL = 1; - - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SERVER = 2; - - // Indicates that the span covers the client-side wrapper around an RPC or - // other remote request. - CLIENT = 3; - - // Indicates that the span describes producer sending a message to a broker. - // Unlike client and server, there is no direct critical path latency - // relationship between producer and consumer spans (e.g. publishing a - // message to a pubsub service). - PRODUCER = 4; - - // Indicates that the span describes consumer receiving a message from a - // broker. Unlike client and server, there is no direct critical path - // latency relationship between producer and consumer spans (e.g. receiving - // a message from a pubsub service subscription). - CONSUMER = 5; - } - - // Required. The resource name of the span in the following format: - // - // * `projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]` - // - // `[TRACE_ID]` is a unique identifier for a trace within a project; - // it is a 32-character hexadecimal encoding of a 16-byte array. It should - // not be zero. - // - // `[SPAN_ID]` is a unique identifier for a span within a trace; it - // is a 16-character hexadecimal encoding of an 8-byte array. It should not - // be zero. - // . - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The `[SPAN_ID]` portion of the span's resource name. - string span_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // The `[SPAN_ID]` of this span's parent span. If this is a root span, - // then this field must be empty. - string parent_span_id = 3; - - // Required. A description of the span's operation (up to 128 bytes). - // Cloud Trace displays the description in the - // Cloud console. - // For example, the display name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name within an application and at the same call point. - // This makes it easier to correlate spans in different traces. - TruncatableString display_name = 4 [(google.api.field_behavior) = REQUIRED]; - - // Required. The start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server - // side, this is the time when the server's application handler starts - // running. - google.protobuf.Timestamp start_time = 5 - [(google.api.field_behavior) = REQUIRED]; - - // Required. The end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server - // side, this is the time when the server application handler stops running. - google.protobuf.Timestamp end_time = 6 - [(google.api.field_behavior) = REQUIRED]; - - // A set of attributes on the span. You can have up to 32 attributes per - // span. - Attributes attributes = 7; - - // Stack trace captured at the start of the span. - StackTrace stack_trace = 8; - - // A set of time events. You can have up to 32 annotations and 128 message - // events per span. - TimeEvents time_events = 9; - - // Links associated with the span. You can have up to 128 links per Span. - Links links = 10; - - // Optional. The final status for this span. - google.rpc.Status status = 11 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Set this parameter to indicate whether this span is in - // the same process as its parent. If you do not set this parameter, - // Trace is unable to take advantage of this helpful information. - google.protobuf.BoolValue same_process_as_parent_span = 12 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The number of child spans that were generated while this span - // was active. If set, allows implementation to detect missing child spans. - google.protobuf.Int32Value child_span_count = 13 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Distinguishes between spans generated in a particular context. - // For example, two spans with the same name may be distinguished using - // `CLIENT` (caller) and `SERVER` (callee) to identify an RPC call. - SpanKind span_kind = 14 [(google.api.field_behavior) = OPTIONAL]; -} - -// The allowed types for `[VALUE]` in a `[KEY]:[VALUE]` attribute. -message AttributeValue { - // The type of the value. - oneof value { - // A string up to 256 bytes long. - TruncatableString string_value = 1; - - // A 64-bit signed integer. - int64 int_value = 2; - - // A Boolean value represented by `true` or `false`. - bool bool_value = 3; - } -} - -// A call stack appearing in a trace. -message StackTrace { - // Represents a single stack frame in a stack trace. - message StackFrame { - // The fully-qualified name that uniquely identifies the function or - // method that is active in this frame (up to 1024 bytes). - TruncatableString function_name = 1; - - // An un-mangled function name, if `function_name` is mangled. - // To get information about name mangling, run - // [this search](https://www.google.com/search?q=cxx+name+mangling). - // The name can be fully-qualified (up to 1024 bytes). - TruncatableString original_function_name = 2; - - // The name of the source file where the function call appears (up to 256 - // bytes). - TruncatableString file_name = 3; - - // The line number in `file_name` where the function call appears. - int64 line_number = 4; - - // The column number where the function call appears, if available. - // This is important in JavaScript because of its anonymous functions. - int64 column_number = 5; - - // The binary module from where the code was loaded. - Module load_module = 6; - - // The version of the deployed source code (up to 128 bytes). - TruncatableString source_version = 7; - } - - // A collection of stack frames, which can be truncated. - message StackFrames { - // Stack frames in this call stack. - repeated StackFrame frame = 1; - - // The number of stack frames that were dropped because there - // were too many stack frames. - // If this value is 0, then no stack frames were dropped. - int32 dropped_frames_count = 2; - } - - // Stack frames in this stack trace. A maximum of 128 frames are allowed. - StackFrames stack_frames = 1; - - // The hash ID is used to conserve network bandwidth for duplicate - // stack traces within a single trace. - // - // Often multiple spans will have identical stack traces. - // The first occurrence of a stack trace should contain both the - // `stackFrame` content and a value in `stackTraceHashId`. - // - // Subsequent spans within the same request can refer - // to that stack trace by only setting `stackTraceHashId`. - int64 stack_trace_hash_id = 2; -} - -// Binary module. -message Module { - // For example: main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so (up to 256 bytes). - TruncatableString module = 1; - - // A unique identifier for the module, usually a hash of its - // contents (up to 128 bytes). - TruncatableString build_id = 2; -} - -// Represents a string that might be shortened to a specified length. -message TruncatableString { - // The shortened string. For example, if the original string is 500 - // bytes long and the limit of the string is 128 bytes, then - // `value` contains the first 128 bytes of the 500-byte string. - // - // Truncation always happens on a UTF8 character boundary. If there - // are multi-byte characters in the string, then the length of the - // shortened string might be less than the size limit. - string value = 1; - - // The number of bytes removed from the original string. If this - // value is 0, then the string was not shortened. - int32 truncated_byte_count = 2; -} diff --git a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto deleted file mode 100644 index 0aac221df2..0000000000 --- a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.devtools.cloudtrace.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/devtools/cloudtrace/v2/trace.proto"; -import "google/protobuf/empty.proto"; - -option csharp_namespace = "Google.Cloud.Trace.V2"; -option go_package = "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace"; -option java_multiple_files = true; -option java_outer_classname = "TracingProto"; -option java_package = "com.google.devtools.cloudtrace.v2"; -option php_namespace = "Google\\Cloud\\Trace\\V2"; -option ruby_package = "Google::Cloud::Trace::V2"; - -// Service for collecting and viewing traces and spans within a trace. -// -// A trace is a collection of spans corresponding to a single -// operation or a set of operations in an application. -// -// A span is an individual timed event which forms a node of the trace tree. -// A single trace can contain spans from multiple services. -service TraceService { - option (google.api.default_host) = "cloudtrace.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/trace.append"; - - // Batch writes new spans to new or existing traces. You cannot update - // existing spans. - rpc BatchWriteSpans(BatchWriteSpansRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*}/traces:batchWrite" - body: "*" - }; - option (google.api.method_signature) = "name,spans"; - } - - // Creates a new span. - rpc CreateSpan(Span) returns (Span) { - option (google.api.http) = { - post: "/v2/{name=projects/*/traces/*/spans/*}" - body: "*" - }; - } -} - -// The request message for the `BatchWriteSpans` method. -message BatchWriteSpansRequest { - // Required. The name of the project where the spans belong. The format is - // `projects/[PROJECT_ID]`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Required. A list of new spans. The span names must not match existing - // spans, otherwise the results are undefined. - repeated Span spans = 2 [(google.api.field_behavior) = REQUIRED]; -} diff --git a/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto b/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto deleted file mode 100644 index b878d60dce..0000000000 --- a/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.logging.type; - -import "google/protobuf/duration.proto"; - -option csharp_namespace = "Google.Cloud.Logging.Type"; -option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; -option java_multiple_files = true; -option java_outer_classname = "HttpRequestProto"; -option java_package = "com.google.logging.type"; -option php_namespace = "Google\\Cloud\\Logging\\Type"; -option ruby_package = "Google::Cloud::Logging::Type"; - -// A common proto for logging HTTP requests. Only contains semantics -// defined by the HTTP specification. Product-specific logging -// information MUST be defined in a separate message. -message HttpRequest { - // The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`. - string request_method = 1; - - // The scheme (http, https), the host name, the path and the query - // portion of the URL that was requested. - // Example: `"http://example.com/some/info?color=red"`. - string request_url = 2; - - // The size of the HTTP request message in bytes, including the request - // headers and the request body. - int64 request_size = 3; - - // The response code indicating the status of response. - // Examples: 200, 404. - int32 status = 4; - - // The size of the HTTP response message sent back to the client, in bytes, - // including the response headers and the response body. - int64 response_size = 5; - - // The user agent sent by the client. Example: - // `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET - // CLR 1.0.3705)"`. - string user_agent = 6; - - // The IP address (IPv4 or IPv6) of the client that issued the HTTP - // request. This field can include port information. Examples: - // `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. - string remote_ip = 7; - - // The IP address (IPv4 or IPv6) of the origin server that the request was - // sent to. This field can include port information. Examples: - // `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. - string server_ip = 13; - - // The referer URL of the request, as defined in - // [HTTP/1.1 Header Field - // Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). - string referer = 8; - - // The request processing latency on the server, from the time the request was - // received until the response was sent. - google.protobuf.Duration latency = 14; - - // Whether or not a cache lookup was attempted. - bool cache_lookup = 11; - - // Whether or not an entity was served from cache - // (with or without validation). - bool cache_hit = 9; - - // Whether or not the response was validated with the origin server before - // being served from cache. This field is only meaningful if `cache_hit` is - // True. - bool cache_validated_with_origin_server = 10; - - // The number of HTTP response bytes inserted into cache. Set only when a - // cache fill was attempted. - int64 cache_fill_bytes = 12; - - // Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", "websocket" - string protocol = 15; -} diff --git a/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto b/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto deleted file mode 100644 index bed71935f9..0000000000 --- a/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.logging.type; - -option csharp_namespace = "Google.Cloud.Logging.Type"; -option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; -option java_multiple_files = true; -option java_outer_classname = "LogSeverityProto"; -option java_package = "com.google.logging.type"; -option objc_class_prefix = "GLOG"; -option php_namespace = "Google\\Cloud\\Logging\\Type"; -option ruby_package = "Google::Cloud::Logging::Type"; - -// The severity of the event described in a log entry, expressed as one of the -// standard severity levels listed below. For your reference, the levels are -// assigned the listed numeric values. The effect of using numeric values other -// than those listed is undefined. -// -// You can filter for log entries by severity. For example, the following -// filter expression will match log entries with severities `INFO`, `NOTICE`, -// and `WARNING`: -// -// severity > DEBUG AND severity <= WARNING -// -// If you are writing log entries, you should map other severity encodings to -// one of these standard levels. For example, you might map all of Java's FINE, -// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the -// original severity level in the log entry payload if you wish. -enum LogSeverity { - // (0) The log entry has no assigned severity level. - DEFAULT = 0; - - // (100) Debug or trace information. - DEBUG = 100; - - // (200) Routine information, such as ongoing status or performance. - INFO = 200; - - // (300) Normal but significant events, such as start up, shut down, or - // a configuration change. - NOTICE = 300; - - // (400) Warning events might cause problems. - WARNING = 400; - - // (500) Error events are likely to cause problems. - ERROR = 500; - - // (600) Critical events cause more severe problems or outages. - CRITICAL = 600; - - // (700) A person must take an action immediately. - ALERT = 700; - - // (800) One or more systems are unusable. - EMERGENCY = 800; -} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto b/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto deleted file mode 100644 index 9971293698..0000000000 --- a/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.logging.v2; - -import "google/api/field_behavior.proto"; -import "google/api/monitored_resource.proto"; -import "google/api/resource.proto"; -import "google/logging/type/http_request.proto"; -import "google/logging/type/log_severity.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.Logging.V2"; -option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; -option java_multiple_files = true; -option java_outer_classname = "LogEntryProto"; -option java_package = "com.google.logging.v2"; -option php_namespace = "Google\\Cloud\\Logging\\V2"; -option ruby_package = "Google::Cloud::Logging::V2"; - -// An individual entry in a log. -message LogEntry { - option (google.api.resource) = { - type: "logging.googleapis.com/Log" - pattern: "projects/{project}/logs/{log}" - pattern: "organizations/{organization}/logs/{log}" - pattern: "folders/{folder}/logs/{log}" - pattern: "billingAccounts/{billing_account}/logs/{log}" - name_field: "log_name" - }; - - // Required. The resource name of the log to which this log entry belongs: - // - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // - // A project number may be used in place of PROJECT_ID. The project number is - // translated to its corresponding PROJECT_ID internally and the `log_name` - // field will contain PROJECT_ID in queries and exports. - // - // `[LOG_ID]` must be URL-encoded within `log_name`. Example: - // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. - // - // `[LOG_ID]` must be less than 512 characters long and can only include the - // following characters: upper and lower case alphanumeric characters, - // forward-slash, underscore, hyphen, and period. - // - // For backward compatibility, if `log_name` begins with a forward-slash, such - // as `/projects/...`, then the log entry is ingested as usual, but the - // forward-slash is removed. Listing the log entry will not show the leading - // slash and filtering for a log name with a leading slash will never return - // any results. - string log_name = 12 [(google.api.field_behavior) = REQUIRED]; - - // Required. The monitored resource that produced this log entry. - // - // Example: a log entry that reports a database error would be associated with - // the monitored resource designating the particular database that reported - // the error. - google.api.MonitoredResource resource = 8 [(google.api.field_behavior) = REQUIRED]; - - // The log entry payload, which can be one of multiple types. - oneof payload { - // The log entry payload, represented as a protocol buffer. Some Google - // Cloud Platform services use this field for their log entry payloads. - // - // The following protocol buffer types are supported; user-defined types - // are not supported: - // - // "type.googleapis.com/google.cloud.audit.AuditLog" - // "type.googleapis.com/google.appengine.logging.v1.RequestLog" - google.protobuf.Any proto_payload = 2; - - // The log entry payload, represented as a Unicode string (UTF-8). - string text_payload = 3; - - // The log entry payload, represented as a structure that is - // expressed as a JSON object. - google.protobuf.Struct json_payload = 6; - } - - // Optional. The time the event described by the log entry occurred. This time is used - // to compute the log entry's age and to enforce the logs retention period. - // If this field is omitted in a new log entry, then Logging assigns it the - // current time. Timestamps have nanosecond accuracy, but trailing zeros in - // the fractional seconds might be omitted when the timestamp is displayed. - // - // Incoming log entries must have timestamps that don't exceed the - // [logs retention - // period](https://cloud.google.com/logging/quotas#logs_retention_periods) in - // the past, and that don't exceed 24 hours in the future. Log entries outside - // those time boundaries aren't ingested by Logging. - google.protobuf.Timestamp timestamp = 9 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. The time the log entry was received by Logging. - google.protobuf.Timestamp receive_timestamp = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. - google.logging.type.LogSeverity severity = 10 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A unique identifier for the log entry. If you provide a value, then - // Logging considers other log entries in the same project, with the same - // `timestamp`, and with the same `insert_id` to be duplicates which are - // removed in a single query result. However, there are no guarantees of - // de-duplication in the export of logs. - // - // If the `insert_id` is omitted when writing a log entry, the Logging API - // assigns its own unique identifier in this field. - // - // In queries, the `insert_id` is also used to order log entries that have - // the same `log_name` and `timestamp` values. - string insert_id = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Information about the HTTP request associated with this log entry, if - // applicable. - google.logging.type.HttpRequest http_request = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A map of key, value pairs that provides additional information about the - // log entry. The labels can be user-defined or system-defined. - // - // User-defined labels are arbitrary key, value pairs that you can use to - // classify logs. - // - // System-defined labels are defined by GCP services for platform logs. - // They have two components - a service namespace component and the - // attribute name. For example: `compute.googleapis.com/resource_name`. - // - // Cloud Logging truncates label keys that exceed 512 B and label - // values that exceed 64 KB upon their associated log entry being - // written. The truncation is indicated by an ellipsis at the - // end of the character string. - map labels = 11 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Information about an operation associated with the log entry, if - // applicable. - LogEntryOperation operation = 15 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Resource name of the trace associated with the log entry, if any. If it - // contains a relative resource name, the name is assumed to be relative to - // `//tracing.googleapis.com`. Example: - // `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` - string trace = 22 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The span ID within the trace associated with the log entry. - // - // For Trace spans, this is the same format that the Trace API v2 uses: a - // 16-character hexadecimal encoding of an 8-byte array, such as - // `000000000000004a`. - string span_id = 27 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The sampling decision of the trace associated with the log entry. - // - // True means that the trace resource name in the `trace` field was sampled - // for storage in a trace backend. False means that the trace was not sampled - // for storage when this log entry was written, or the sampling decision was - // unknown at the time. A non-sampled `trace` value is still useful as a - // request correlation identifier. The default is False. - bool trace_sampled = 30 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Source code location information associated with the log entry, if any. - LogEntrySourceLocation source_location = 23 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Information indicating this LogEntry is part of a sequence of multiple log - // entries split from a single LogEntry. - LogSplit split = 35 [(google.api.field_behavior) = OPTIONAL]; -} - -// Additional information about a potentially long-running operation with which -// a log entry is associated. -message LogEntryOperation { - // Optional. An arbitrary operation identifier. Log entries with the same - // identifier are assumed to be part of the same operation. - string id = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. An arbitrary producer identifier. The combination of `id` and - // `producer` must be globally unique. Examples for `producer`: - // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. - string producer = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Set this to True if this is the first log entry in the operation. - bool first = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Set this to True if this is the last log entry in the operation. - bool last = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// Additional information about the source code location that produced the log -// entry. -message LogEntrySourceLocation { - // Optional. Source file name. Depending on the runtime environment, this - // might be a simple name or a fully-qualified name. - string file = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Line within the source file. 1-based; 0 indicates no line number - // available. - int64 line = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Human-readable name of the function or method being invoked, with - // optional context such as the class or package name. This information may be - // used in contexts such as the logs viewer, where a file and line number are - // less meaningful. The format can vary by language. For example: - // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` - // (Python). - string function = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// Additional information used to correlate multiple log entries. Used when a -// single LogEntry would exceed the Google Cloud Logging size limit and is -// split across multiple log entries. -message LogSplit { - // A globally unique identifier for all log entries in a sequence of split log - // entries. All log entries with the same |LogSplit.uid| are assumed to be - // part of the same sequence of split log entries. - string uid = 1; - - // The index of this LogEntry in the sequence of split log entries. Log - // entries are given |index| values 0, 1, ..., n-1 for a sequence of n log - // entries. - int32 index = 2; - - // The total number of log entries that the original LogEntry was split into. - int32 total_splits = 3; -} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto deleted file mode 100644 index b7f4f189d2..0000000000 --- a/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.logging.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/monitored_resource.proto"; -import "google/api/resource.proto"; -import "google/logging/v2/log_entry.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/rpc/status.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.Logging.V2"; -option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; -option java_multiple_files = true; -option java_outer_classname = "LoggingProto"; -option java_package = "com.google.logging.v2"; -option php_namespace = "Google\\Cloud\\Logging\\V2"; -option ruby_package = "Google::Cloud::Logging::V2"; - -// Service for ingesting and querying logs. -service LoggingServiceV2 { - option (google.api.default_host) = "logging.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only," - "https://www.googleapis.com/auth/logging.admin," - "https://www.googleapis.com/auth/logging.read," - "https://www.googleapis.com/auth/logging.write"; - - // Deletes all the log entries in a log for the _Default Log Bucket. The log - // reappears if it receives new entries. Log entries written shortly before - // the delete operation might not be deleted. Entries received after the - // delete operation with a timestamp before the operation will be deleted. - rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{log_name=projects/*/logs/*}" - additional_bindings { - delete: "/v2/{log_name=*/*/logs/*}" - } - additional_bindings { - delete: "/v2/{log_name=organizations/*/logs/*}" - } - additional_bindings { - delete: "/v2/{log_name=folders/*/logs/*}" - } - additional_bindings { - delete: "/v2/{log_name=billingAccounts/*/logs/*}" - } - }; - option (google.api.method_signature) = "log_name"; - } - - // Writes log entries to Logging. This API method is the - // only way to send log entries to Logging. This method - // is used, directly or indirectly, by the Logging agent - // (fluentd) and all logging libraries configured to use Logging. - // A single request may contain log entries for a maximum of 1000 - // different resources (projects, organizations, billing accounts or - // folders) - rpc WriteLogEntries(WriteLogEntriesRequest) returns (WriteLogEntriesResponse) { - option (google.api.http) = { - post: "/v2/entries:write" - body: "*" - }; - option (google.api.method_signature) = "log_name,resource,labels,entries"; - } - - // Lists log entries. Use this method to retrieve log entries that originated - // from a project/folder/organization/billing account. For ways to export log - // entries, see [Exporting - // Logs](https://cloud.google.com/logging/docs/export). - rpc ListLogEntries(ListLogEntriesRequest) returns (ListLogEntriesResponse) { - option (google.api.http) = { - post: "/v2/entries:list" - body: "*" - }; - option (google.api.method_signature) = "resource_names,filter,order_by"; - } - - // Lists the descriptors for monitored resource types used by Logging. - rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) { - option (google.api.http) = { - get: "/v2/monitoredResourceDescriptors" - }; - } - - // Lists the logs in projects, organizations, folders, or billing accounts. - // Only logs that have entries are listed. - rpc ListLogs(ListLogsRequest) returns (ListLogsResponse) { - option (google.api.http) = { - get: "/v2/{parent=*/*}/logs" - additional_bindings { - get: "/v2/{parent=projects/*}/logs" - } - additional_bindings { - get: "/v2/{parent=organizations/*}/logs" - } - additional_bindings { - get: "/v2/{parent=folders/*}/logs" - } - additional_bindings { - get: "/v2/{parent=billingAccounts/*}/logs" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Streaming read of log entries as they are ingested. Until the stream is - // terminated, it will continue reading logs. - rpc TailLogEntries(stream TailLogEntriesRequest) returns (stream TailLogEntriesResponse) { - option (google.api.http) = { - post: "/v2/entries:tail" - body: "*" - }; - } -} - -// The parameters to DeleteLog. -message DeleteLogRequest { - // Required. The resource name of the log to delete: - // - // * `projects/[PROJECT_ID]/logs/[LOG_ID]` - // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` - // * `folders/[FOLDER_ID]/logs/[LOG_ID]` - // - // `[LOG_ID]` must be URL-encoded. For example, - // `"projects/my-project-id/logs/syslog"`, - // `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. - // - // For more information about log names, see - // [LogEntry][google.logging.v2.LogEntry]. - string log_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/Log" - } - ]; -} - -// The parameters to WriteLogEntries. -message WriteLogEntriesRequest { - // Optional. A default log resource name that is assigned to all log entries - // in `entries` that do not specify a value for `log_name`: - // - // * `projects/[PROJECT_ID]/logs/[LOG_ID]` - // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` - // * `folders/[FOLDER_ID]/logs/[LOG_ID]` - // - // `[LOG_ID]` must be URL-encoded. For example: - // - // "projects/my-project-id/logs/syslog" - // "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" - // - // The permission `logging.logEntries.create` is needed on each project, - // organization, billing account, or folder that is receiving new log - // entries, whether the resource is specified in `logName` or in an - // individual log entry. - string log_name = 1 [ - (google.api.field_behavior) = OPTIONAL, - (google.api.resource_reference) = { - type: "logging.googleapis.com/Log" - } - ]; - - // Optional. A default monitored resource object that is assigned to all log - // entries in `entries` that do not specify a value for `resource`. Example: - // - // { "type": "gce_instance", - // "labels": { - // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} - // - // See [LogEntry][google.logging.v2.LogEntry]. - google.api.MonitoredResource resource = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Default labels that are added to the `labels` field of all log - // entries in `entries`. If a log entry already has a label with the same key - // as a label in this parameter, then the log entry's label is not changed. - // See [LogEntry][google.logging.v2.LogEntry]. - map labels = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Required. The log entries to send to Logging. The order of log - // entries in this list does not matter. Values supplied in this method's - // `log_name`, `resource`, and `labels` fields are copied into those log - // entries in this list that do not include values for their corresponding - // fields. For more information, see the - // [LogEntry][google.logging.v2.LogEntry] type. - // - // If the `timestamp` or `insert_id` fields are missing in log entries, then - // this method supplies the current time or a unique identifier, respectively. - // The supplied values are chosen so that, among the log entries that did not - // supply their own values, the entries earlier in the list will sort before - // the entries later in the list. See the `entries.list` method. - // - // Log entries with timestamps that are more than the - // [logs retention period](https://cloud.google.com/logging/quotas) in - // the past or more than 24 hours in the future will not be available when - // calling `entries.list`. However, those log entries can still be [exported - // with - // LogSinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). - // - // To improve throughput and to avoid exceeding the - // [quota limit](https://cloud.google.com/logging/quotas) for calls to - // `entries.write`, you should try to include several log entries in this - // list, rather than calling this method for each individual log entry. - repeated LogEntry entries = 4 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Whether valid entries should be written even if some other - // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any - // entry is not written, then the response status is the error associated - // with one of the failed entries and the response includes error details - // keyed by the entries' zero-based index in the `entries.write` method. - bool partial_success = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If true, the request should expect normal response, but the - // entries won't be persisted nor exported. Useful for checking whether the - // logging API endpoints are working properly before sending valuable data. - bool dry_run = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// Result returned from WriteLogEntries. -message WriteLogEntriesResponse { - -} - -// Error details for WriteLogEntries with partial success. -message WriteLogEntriesPartialErrors { - // When `WriteLogEntriesRequest.partial_success` is true, records the error - // status for entries that were not written due to a permanent error, keyed - // by the entry's zero-based index in `WriteLogEntriesRequest.entries`. - // - // Failed requests for which no entries are written will not include - // per-entry errors. - map log_entry_errors = 1; -} - -// The parameters to `ListLogEntries`. -message ListLogEntriesRequest { - // Required. Names of one or more parent resources from which to - // retrieve log entries: - // - // * `projects/[PROJECT_ID]` - // * `organizations/[ORGANIZATION_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]` - // * `folders/[FOLDER_ID]` - // - // May alternatively be one or more views: - // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // - // Projects listed in the `project_ids` field are added to this list. - repeated string resource_names = 8 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/Log" - } - ]; - - // Optional. A filter that chooses which log entries to return. See [Advanced - // Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - // Only log entries that match the filter are returned. An empty filter - // matches all log entries in the resources listed in `resource_names`. - // Referencing a parent resource that is not listed in `resource_names` will - // cause the filter to return no results. The maximum length of the filter is - // 20000 characters. - string filter = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. How the results should be sorted. Presently, the only permitted - // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first - // option returns entries in order of increasing values of - // `LogEntry.timestamp` (oldest first), and the second option returns entries - // in order of decreasing timestamps (newest first). Entries with equal - // timestamps are returned in order of their `insert_id` values. - string order_by = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The maximum number of results to return from this request. Default is 50. - // If the value is negative or exceeds 1000, the request is rejected. The - // presence of `next_page_token` in the response indicates that more results - // might be available. - int32 page_size = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `page_token` must be the value of - // `next_page_token` from the previous response. The values of other method - // parameters should be identical to those in the previous call. - string page_token = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Result returned from `ListLogEntries`. -message ListLogEntriesResponse { - // A list of log entries. If `entries` is empty, `nextPageToken` may still be - // returned, indicating that more entries may exist. See `nextPageToken` for - // more information. - repeated LogEntry entries = 1; - - // If there might be more results than those appearing in this response, then - // `nextPageToken` is included. To get the next set of results, call this - // method again using the value of `nextPageToken` as `pageToken`. - // - // If a value for `next_page_token` appears and the `entries` field is empty, - // it means that the search found no log entries so far but it did not have - // time to search all the possible log entries. Retry the method with this - // value for `page_token` to continue the search. Alternatively, consider - // speeding up the search by changing your filter to specify a single log name - // or resource type, or to narrow the time range of the search. - string next_page_token = 2; -} - -// The parameters to ListMonitoredResourceDescriptors -message ListMonitoredResourceDescriptorsRequest { - // Optional. The maximum number of results to return from this request. - // Non-positive values are ignored. The presence of `nextPageToken` in the - // response indicates that more results might be available. - int32 page_size = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `pageToken` must be the value of - // `nextPageToken` from the previous response. The values of other method - // parameters should be identical to those in the previous call. - string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; -} - -// Result returned from ListMonitoredResourceDescriptors. -message ListMonitoredResourceDescriptorsResponse { - // A list of resource descriptors. - repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1; - - // If there might be more results than those appearing in this response, then - // `nextPageToken` is included. To get the next set of results, call this - // method again using the value of `nextPageToken` as `pageToken`. - string next_page_token = 2; -} - -// The parameters to ListLogs. -message ListLogsRequest { - // Required. The resource name that owns the logs: - // - // * `projects/[PROJECT_ID]` - // * `organizations/[ORGANIZATION_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]` - // * `folders/[FOLDER_ID]` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/Log" - } - ]; - - // Optional. The maximum number of results to return from this request. - // Non-positive values are ignored. The presence of `nextPageToken` in the - // response indicates that more results might be available. - int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `pageToken` must be the value of - // `nextPageToken` from the previous response. The values of other method - // parameters should be identical to those in the previous call. - string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The resource name that owns the logs: - // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // - // To support legacy queries, it could also be: - // - // * `projects/[PROJECT_ID]` - // * `organizations/[ORGANIZATION_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]` - // * `folders/[FOLDER_ID]` - repeated string resource_names = 8 [ - (google.api.field_behavior) = OPTIONAL, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/Log" - } - ]; -} - -// Result returned from ListLogs. -message ListLogsResponse { - // A list of log names. For example, - // `"projects/my-project/logs/syslog"` or - // `"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"`. - repeated string log_names = 3; - - // If there might be more results than those appearing in this response, then - // `nextPageToken` is included. To get the next set of results, call this - // method again using the value of `nextPageToken` as `pageToken`. - string next_page_token = 2; -} - -// The parameters to `TailLogEntries`. -message TailLogEntriesRequest { - // Required. Name of a parent resource from which to retrieve log entries: - // - // * `projects/[PROJECT_ID]` - // * `organizations/[ORGANIZATION_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]` - // * `folders/[FOLDER_ID]` - // - // May alternatively be one or more views: - // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - repeated string resource_names = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A filter that chooses which log entries to return. See [Advanced - // Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). - // Only log entries that match the filter are returned. An empty filter - // matches all log entries in the resources listed in `resource_names`. - // Referencing a parent resource that is not in `resource_names` will cause - // the filter to return no results. The maximum length of the filter is 20000 - // characters. - string filter = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The amount of time to buffer log entries at the server before - // being returned to prevent out of order results due to late arriving log - // entries. Valid values are between 0-60000 milliseconds. Defaults to 2000 - // milliseconds. - google.protobuf.Duration buffer_window = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// Result returned from `TailLogEntries`. -message TailLogEntriesResponse { - // Information about entries that were omitted from the session. - message SuppressionInfo { - // An indicator of why entries were omitted. - enum Reason { - // Unexpected default. - REASON_UNSPECIFIED = 0; - - // Indicates suppression occurred due to relevant entries being - // received in excess of rate limits. For quotas and limits, see - // [Logging API quotas and - // limits](https://cloud.google.com/logging/quotas#api-limits). - RATE_LIMIT = 1; - - // Indicates suppression occurred due to the client not consuming - // responses quickly enough. - NOT_CONSUMED = 2; - } - - // The reason that entries were omitted from the session. - Reason reason = 1; - - // A lower bound on the count of entries omitted due to `reason`. - int32 suppressed_count = 2; - } - - // A list of log entries. Each response in the stream will order entries with - // increasing values of `LogEntry.timestamp`. Ordering is not guaranteed - // between separate responses. - repeated LogEntry entries = 1; - - // If entries that otherwise would have been included in the session were not - // sent back to the client, counts of relevant entries omitted from the - // session with the reason that they were not included. There will be at most - // one of each reason per response. The counts represent the number of - // suppressed entries since the last streamed response. - repeated SuppressionInfo suppression_info = 2; -} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto deleted file mode 100644 index ef0024063d..0000000000 --- a/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto +++ /dev/null @@ -1,1957 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.logging.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.Logging.V2"; -option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; -option java_multiple_files = true; -option java_outer_classname = "LoggingConfigProto"; -option java_package = "com.google.logging.v2"; -option php_namespace = "Google\\Cloud\\Logging\\V2"; -option ruby_package = "Google::Cloud::Logging::V2"; -option (google.api.resource_definition) = { - type: "logging.googleapis.com/OrganizationLocation" - pattern: "organizations/{organization}/locations/{location}" -}; -option (google.api.resource_definition) = { - type: "logging.googleapis.com/FolderLocation" - pattern: "folders/{folder}/locations/{location}" -}; -option (google.api.resource_definition) = { - type: "logging.googleapis.com/BillingAccountLocation" - pattern: "billingAccounts/{billing_account}/locations/{location}" -}; - -// Service for configuring sinks used to route log entries. -service ConfigServiceV2 { - option (google.api.default_host) = "logging.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only," - "https://www.googleapis.com/auth/logging.admin," - "https://www.googleapis.com/auth/logging.read"; - - // Lists log buckets. - rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { - option (google.api.http) = { - get: "/v2/{parent=*/*/locations/*}/buckets" - additional_bindings { - get: "/v2/{parent=projects/*/locations/*}/buckets" - } - additional_bindings { - get: "/v2/{parent=organizations/*/locations/*}/buckets" - } - additional_bindings { - get: "/v2/{parent=folders/*/locations/*}/buckets" - } - additional_bindings { - get: "/v2/{parent=billingAccounts/*/locations/*}/buckets" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Gets a log bucket. - rpc GetBucket(GetBucketRequest) returns (LogBucket) { - option (google.api.http) = { - get: "/v2/{name=*/*/locations/*/buckets/*}" - additional_bindings { - get: "/v2/{name=projects/*/locations/*/buckets/*}" - } - additional_bindings { - get: "/v2/{name=organizations/*/locations/*/buckets/*}" - } - additional_bindings { - get: "/v2/{name=folders/*/locations/*/buckets/*}" - } - additional_bindings { - get: "/v2/{name=billingAccounts/*/buckets/*}" - } - }; - } - - // Creates a log bucket that can be used to store log entries. After a bucket - // has been created, the bucket's location cannot be changed. - rpc CreateBucket(CreateBucketRequest) returns (LogBucket) { - option (google.api.http) = { - post: "/v2/{parent=*/*/locations/*}/buckets" - body: "bucket" - additional_bindings { - post: "/v2/{parent=projects/*/locations/*}/buckets" - body: "bucket" - } - additional_bindings { - post: "/v2/{parent=organizations/*/locations/*}/buckets" - body: "bucket" - } - additional_bindings { - post: "/v2/{parent=folders/*/locations/*}/buckets" - body: "bucket" - } - additional_bindings { - post: "/v2/{parent=billingAccounts/*/locations/*}/buckets" - body: "bucket" - } - }; - } - - // Updates a log bucket. This method replaces the following fields in the - // existing bucket with values from the new bucket: `retention_period` - // - // If the retention period is decreased and the bucket is locked, - // `FAILED_PRECONDITION` will be returned. - // - // If the bucket has a `lifecycle_state` of `DELETE_REQUESTED`, then - // `FAILED_PRECONDITION` will be returned. - // - // After a bucket has been created, the bucket's location cannot be changed. - rpc UpdateBucket(UpdateBucketRequest) returns (LogBucket) { - option (google.api.http) = { - patch: "/v2/{name=*/*/locations/*/buckets/*}" - body: "bucket" - additional_bindings { - patch: "/v2/{name=projects/*/locations/*/buckets/*}" - body: "bucket" - } - additional_bindings { - patch: "/v2/{name=organizations/*/locations/*/buckets/*}" - body: "bucket" - } - additional_bindings { - patch: "/v2/{name=folders/*/locations/*/buckets/*}" - body: "bucket" - } - additional_bindings { - patch: "/v2/{name=billingAccounts/*/locations/*/buckets/*}" - body: "bucket" - } - }; - } - - // Deletes a log bucket. - // - // Changes the bucket's `lifecycle_state` to the `DELETE_REQUESTED` state. - // After 7 days, the bucket will be purged and all log entries in the bucket - // will be permanently deleted. - rpc DeleteBucket(DeleteBucketRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=*/*/locations/*/buckets/*}" - additional_bindings { - delete: "/v2/{name=projects/*/locations/*/buckets/*}" - } - additional_bindings { - delete: "/v2/{name=organizations/*/locations/*/buckets/*}" - } - additional_bindings { - delete: "/v2/{name=folders/*/locations/*/buckets/*}" - } - additional_bindings { - delete: "/v2/{name=billingAccounts/*/locations/*/buckets/*}" - } - }; - } - - // Undeletes a log bucket. A bucket that has been deleted can be undeleted - // within the grace period of 7 days. - rpc UndeleteBucket(UndeleteBucketRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=*/*/locations/*/buckets/*}:undelete" - body: "*" - additional_bindings { - post: "/v2/{name=projects/*/locations/*/buckets/*}:undelete" - body: "*" - } - additional_bindings { - post: "/v2/{name=organizations/*/locations/*/buckets/*}:undelete" - body: "*" - } - additional_bindings { - post: "/v2/{name=folders/*/locations/*/buckets/*}:undelete" - body: "*" - } - additional_bindings { - post: "/v2/{name=billingAccounts/*/locations/*/buckets/*}:undelete" - body: "*" - } - }; - } - - // Lists views on a log bucket. - rpc ListViews(ListViewsRequest) returns (ListViewsResponse) { - option (google.api.http) = { - get: "/v2/{parent=*/*/locations/*/buckets/*}/views" - additional_bindings { - get: "/v2/{parent=projects/*/locations/*/buckets/*}/views" - } - additional_bindings { - get: "/v2/{parent=organizations/*/locations/*/buckets/*}/views" - } - additional_bindings { - get: "/v2/{parent=folders/*/locations/*/buckets/*}/views" - } - additional_bindings { - get: "/v2/{parent=billingAccounts/*/locations/*/buckets/*}/views" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Gets a view on a log bucket.. - rpc GetView(GetViewRequest) returns (LogView) { - option (google.api.http) = { - get: "/v2/{name=*/*/locations/*/buckets/*/views/*}" - additional_bindings { - get: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - get: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - get: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - get: "/v2/{name=billingAccounts/*/buckets/*/views/*}" - } - }; - } - - // Creates a view over log entries in a log bucket. A bucket may contain a - // maximum of 30 views. - rpc CreateView(CreateViewRequest) returns (LogView) { - option (google.api.http) = { - post: "/v2/{parent=*/*/locations/*/buckets/*}/views" - body: "view" - additional_bindings { - post: "/v2/{parent=projects/*/locations/*/buckets/*}/views" - body: "view" - } - additional_bindings { - post: "/v2/{parent=organizations/*/locations/*/buckets/*}/views" - body: "view" - } - additional_bindings { - post: "/v2/{parent=folders/*/locations/*/buckets/*}/views" - body: "view" - } - additional_bindings { - post: "/v2/{parent=billingAccounts/*/locations/*/buckets/*}/views" - body: "view" - } - }; - } - - // Updates a view on a log bucket. This method replaces the following fields - // in the existing view with values from the new view: `filter`. - // If an `UNAVAILABLE` error is returned, this indicates that system is not in - // a state where it can update the view. If this occurs, please try again in a - // few minutes. - rpc UpdateView(UpdateViewRequest) returns (LogView) { - option (google.api.http) = { - patch: "/v2/{name=*/*/locations/*/buckets/*/views/*}" - body: "view" - additional_bindings { - patch: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" - body: "view" - } - additional_bindings { - patch: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" - body: "view" - } - additional_bindings { - patch: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" - body: "view" - } - additional_bindings { - patch: "/v2/{name=billingAccounts/*/locations/*/buckets/*/views/*}" - body: "view" - } - }; - } - - // Deletes a view on a log bucket. - // If an `UNAVAILABLE` error is returned, this indicates that system is not in - // a state where it can delete the view. If this occurs, please try again in a - // few minutes. - rpc DeleteView(DeleteViewRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=*/*/locations/*/buckets/*/views/*}" - additional_bindings { - delete: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - delete: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - delete: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" - } - additional_bindings { - delete: "/v2/{name=billingAccounts/*/locations/*/buckets/*/views/*}" - } - }; - } - - // Lists sinks. - rpc ListSinks(ListSinksRequest) returns (ListSinksResponse) { - option (google.api.http) = { - get: "/v2/{parent=*/*}/sinks" - additional_bindings { - get: "/v2/{parent=projects/*}/sinks" - } - additional_bindings { - get: "/v2/{parent=organizations/*}/sinks" - } - additional_bindings { - get: "/v2/{parent=folders/*}/sinks" - } - additional_bindings { - get: "/v2/{parent=billingAccounts/*}/sinks" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Gets a sink. - rpc GetSink(GetSinkRequest) returns (LogSink) { - option (google.api.http) = { - get: "/v2/{sink_name=*/*/sinks/*}" - additional_bindings { - get: "/v2/{sink_name=projects/*/sinks/*}" - } - additional_bindings { - get: "/v2/{sink_name=organizations/*/sinks/*}" - } - additional_bindings { - get: "/v2/{sink_name=folders/*/sinks/*}" - } - additional_bindings { - get: "/v2/{sink_name=billingAccounts/*/sinks/*}" - } - }; - option (google.api.method_signature) = "sink_name"; - } - - // Creates a sink that exports specified log entries to a destination. The - // export of newly-ingested log entries begins immediately, unless the sink's - // `writer_identity` is not permitted to write to the destination. A sink can - // export log entries only from the resource owning the sink. - rpc CreateSink(CreateSinkRequest) returns (LogSink) { - option (google.api.http) = { - post: "/v2/{parent=*/*}/sinks" - body: "sink" - additional_bindings { - post: "/v2/{parent=projects/*}/sinks" - body: "sink" - } - additional_bindings { - post: "/v2/{parent=organizations/*}/sinks" - body: "sink" - } - additional_bindings { - post: "/v2/{parent=folders/*}/sinks" - body: "sink" - } - additional_bindings { - post: "/v2/{parent=billingAccounts/*}/sinks" - body: "sink" - } - }; - option (google.api.method_signature) = "parent,sink"; - } - - // Updates a sink. This method replaces the following fields in the existing - // sink with values from the new sink: `destination`, and `filter`. - // - // The updated sink might also have a new `writer_identity`; see the - // `unique_writer_identity` field. - rpc UpdateSink(UpdateSinkRequest) returns (LogSink) { - option (google.api.http) = { - put: "/v2/{sink_name=*/*/sinks/*}" - body: "sink" - additional_bindings { - put: "/v2/{sink_name=projects/*/sinks/*}" - body: "sink" - } - additional_bindings { - put: "/v2/{sink_name=organizations/*/sinks/*}" - body: "sink" - } - additional_bindings { - put: "/v2/{sink_name=folders/*/sinks/*}" - body: "sink" - } - additional_bindings { - put: "/v2/{sink_name=billingAccounts/*/sinks/*}" - body: "sink" - } - additional_bindings { - patch: "/v2/{sink_name=projects/*/sinks/*}" - body: "sink" - } - additional_bindings { - patch: "/v2/{sink_name=organizations/*/sinks/*}" - body: "sink" - } - additional_bindings { - patch: "/v2/{sink_name=folders/*/sinks/*}" - body: "sink" - } - additional_bindings { - patch: "/v2/{sink_name=billingAccounts/*/sinks/*}" - body: "sink" - } - }; - option (google.api.method_signature) = "sink_name,sink,update_mask"; - option (google.api.method_signature) = "sink_name,sink"; - } - - // Deletes a sink. If the sink has a unique `writer_identity`, then that - // service account is also deleted. - rpc DeleteSink(DeleteSinkRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{sink_name=*/*/sinks/*}" - additional_bindings { - delete: "/v2/{sink_name=projects/*/sinks/*}" - } - additional_bindings { - delete: "/v2/{sink_name=organizations/*/sinks/*}" - } - additional_bindings { - delete: "/v2/{sink_name=folders/*/sinks/*}" - } - additional_bindings { - delete: "/v2/{sink_name=billingAccounts/*/sinks/*}" - } - }; - option (google.api.method_signature) = "sink_name"; - } - - // Lists all the exclusions on the _Default sink in a parent resource. - rpc ListExclusions(ListExclusionsRequest) returns (ListExclusionsResponse) { - option (google.api.http) = { - get: "/v2/{parent=*/*}/exclusions" - additional_bindings { - get: "/v2/{parent=projects/*}/exclusions" - } - additional_bindings { - get: "/v2/{parent=organizations/*}/exclusions" - } - additional_bindings { - get: "/v2/{parent=folders/*}/exclusions" - } - additional_bindings { - get: "/v2/{parent=billingAccounts/*}/exclusions" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Gets the description of an exclusion in the _Default sink. - rpc GetExclusion(GetExclusionRequest) returns (LogExclusion) { - option (google.api.http) = { - get: "/v2/{name=*/*/exclusions/*}" - additional_bindings { - get: "/v2/{name=projects/*/exclusions/*}" - } - additional_bindings { - get: "/v2/{name=organizations/*/exclusions/*}" - } - additional_bindings { - get: "/v2/{name=folders/*/exclusions/*}" - } - additional_bindings { - get: "/v2/{name=billingAccounts/*/exclusions/*}" - } - }; - option (google.api.method_signature) = "name"; - } - - // Creates a new exclusion in the _Default sink in a specified parent - // resource. Only log entries belonging to that resource can be excluded. You - // can have up to 10 exclusions in a resource. - rpc CreateExclusion(CreateExclusionRequest) returns (LogExclusion) { - option (google.api.http) = { - post: "/v2/{parent=*/*}/exclusions" - body: "exclusion" - additional_bindings { - post: "/v2/{parent=projects/*}/exclusions" - body: "exclusion" - } - additional_bindings { - post: "/v2/{parent=organizations/*}/exclusions" - body: "exclusion" - } - additional_bindings { - post: "/v2/{parent=folders/*}/exclusions" - body: "exclusion" - } - additional_bindings { - post: "/v2/{parent=billingAccounts/*}/exclusions" - body: "exclusion" - } - }; - option (google.api.method_signature) = "parent,exclusion"; - } - - // Changes one or more properties of an existing exclusion in the _Default - // sink. - rpc UpdateExclusion(UpdateExclusionRequest) returns (LogExclusion) { - option (google.api.http) = { - patch: "/v2/{name=*/*/exclusions/*}" - body: "exclusion" - additional_bindings { - patch: "/v2/{name=projects/*/exclusions/*}" - body: "exclusion" - } - additional_bindings { - patch: "/v2/{name=organizations/*/exclusions/*}" - body: "exclusion" - } - additional_bindings { - patch: "/v2/{name=folders/*/exclusions/*}" - body: "exclusion" - } - additional_bindings { - patch: "/v2/{name=billingAccounts/*/exclusions/*}" - body: "exclusion" - } - }; - option (google.api.method_signature) = "name,exclusion,update_mask"; - } - - // Deletes an exclusion in the _Default sink. - rpc DeleteExclusion(DeleteExclusionRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=*/*/exclusions/*}" - additional_bindings { - delete: "/v2/{name=projects/*/exclusions/*}" - } - additional_bindings { - delete: "/v2/{name=organizations/*/exclusions/*}" - } - additional_bindings { - delete: "/v2/{name=folders/*/exclusions/*}" - } - additional_bindings { - delete: "/v2/{name=billingAccounts/*/exclusions/*}" - } - }; - option (google.api.method_signature) = "name"; - } - - // Gets the Logging CMEK settings for the given resource. - // - // Note: CMEK for the Log Router can be configured for Google Cloud projects, - // folders, organizations and billing accounts. Once configured for an - // organization, it applies to all projects and folders in the Google Cloud - // organization. - // - // See [Enabling CMEK for Log - // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) - // for more information. - rpc GetCmekSettings(GetCmekSettingsRequest) returns (CmekSettings) { - option (google.api.http) = { - get: "/v2/{name=*/*}/cmekSettings" - additional_bindings { - get: "/v2/{name=projects/*}/cmekSettings" - } - additional_bindings { - get: "/v2/{name=organizations/*}/cmekSettings" - } - additional_bindings { - get: "/v2/{name=folders/*}/cmekSettings" - } - additional_bindings { - get: "/v2/{name=billingAccounts/*}/cmekSettings" - } - }; - } - - // Updates the Log Router CMEK settings for the given resource. - // - // Note: CMEK for the Log Router can currently only be configured for Google - // Cloud organizations. Once configured, it applies to all projects and - // folders in the Google Cloud organization. - // - // [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] - // will fail if 1) `kms_key_name` is invalid, or 2) the associated service - // account does not have the required - // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or - // 3) access to the key is disabled. - // - // See [Enabling CMEK for Log - // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) - // for more information. - rpc UpdateCmekSettings(UpdateCmekSettingsRequest) returns (CmekSettings) { - option (google.api.http) = { - patch: "/v2/{name=*/*}/cmekSettings" - body: "cmek_settings" - additional_bindings { - patch: "/v2/{name=organizations/*}/cmekSettings" - body: "cmek_settings" - } - }; - } - - // Gets the Log Router settings for the given resource. - // - // Note: Settings for the Log Router can be get for Google Cloud projects, - // folders, organizations and billing accounts. Currently it can only be - // configured for organizations. Once configured for an organization, it - // applies to all projects and folders in the Google Cloud organization. - // - // See [Enabling CMEK for Log - // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) - // for more information. - rpc GetSettings(GetSettingsRequest) returns (Settings) { - option (google.api.http) = { - get: "/v2/{name=*/*}/settings" - additional_bindings { - get: "/v2/{name=projects/*}/settings" - } - additional_bindings { - get: "/v2/{name=organizations/*}/settings" - } - additional_bindings { - get: "/v2/{name=folders/*}/settings" - } - additional_bindings { - get: "/v2/{name=billingAccounts/*}/settings" - } - }; - option (google.api.method_signature) = "name"; - } - - // Updates the Log Router settings for the given resource. - // - // Note: Settings for the Log Router can currently only be configured for - // Google Cloud organizations. Once configured, it applies to all projects and - // folders in the Google Cloud organization. - // - // [UpdateSettings][google.logging.v2.ConfigServiceV2.UpdateSettings] - // will fail if 1) `kms_key_name` is invalid, or 2) the associated service - // account does not have the required - // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or - // 3) access to the key is disabled. 4) `location_id` is not supported by - // Logging. 5) `location_id` violate OrgPolicy. - // - // See [Enabling CMEK for Log - // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) - // for more information. - rpc UpdateSettings(UpdateSettingsRequest) returns (Settings) { - option (google.api.http) = { - patch: "/v2/{name=*/*}/settings" - body: "settings" - additional_bindings { - patch: "/v2/{name=organizations/*}/settings" - body: "settings" - } - additional_bindings { - patch: "/v2/{name=folders/*}/settings" - body: "settings" - } - }; - option (google.api.method_signature) = "settings,update_mask"; - } - - // Copies a set of log entries from a log bucket to a Cloud Storage bucket. - rpc CopyLogEntries(CopyLogEntriesRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/entries:copy" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "CopyLogEntriesResponse" - metadata_type: "CopyLogEntriesMetadata" - }; - } -} - -// Describes a repository in which log entries are stored. -message LogBucket { - option (google.api.resource) = { - type: "logging.googleapis.com/LogBucket" - pattern: "projects/{project}/locations/{location}/buckets/{bucket}" - pattern: "organizations/{organization}/locations/{location}/buckets/{bucket}" - pattern: "folders/{folder}/locations/{location}/buckets/{bucket}" - pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}" - }; - - // Output only. The resource name of the bucket. - // - // For example: - // - // `projects/my-project/locations/global/buckets/my-bucket` - // - // For a list of supported locations, see [Supported - // Regions](https://cloud.google.com/logging/docs/region-support) - // - // For the location of `global` it is unspecified where log entries are - // actually stored. - // - // After a bucket has been created, the location cannot be changed. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Describes this bucket. - string description = 3; - - // Output only. The creation timestamp of the bucket. This is not set for any of the - // default buckets. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The last update timestamp of the bucket. - google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Logs will be retained by default for this amount of time, after which they - // will automatically be deleted. The minimum retention period is 1 day. If - // this value is set to zero at bucket creation time, the default time of 30 - // days will be used. - int32 retention_days = 11; - - // Whether the bucket is locked. - // - // The retention period on a locked bucket cannot be changed. Locked buckets - // may only be deleted if they are empty. - bool locked = 9; - - // Output only. The bucket lifecycle state. - LifecycleState lifecycle_state = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Log entry field paths that are denied access in this bucket. - // - // The following fields and their children are eligible: `textPayload`, - // `jsonPayload`, `protoPayload`, `httpRequest`, `labels`, `sourceLocation`. - // - // Restricting a repeated field will restrict all values. Adding a parent will - // block all child fields. (e.g. `foo.bar` will block `foo.bar.baz`) - repeated string restricted_fields = 15; - - // The CMEK settings of the log bucket. If present, new log entries written to - // this log bucket are encrypted using the CMEK key provided in this - // configuration. If a log bucket has CMEK settings, the CMEK settings cannot - // be disabled later by updating the log bucket. Changing the KMS key is - // allowed. - CmekSettings cmek_settings = 19; -} - -// Describes a view over log entries in a bucket. -message LogView { - option (google.api.resource) = { - type: "logging.googleapis.com/LogView" - pattern: "projects/{project}/locations/{location}/buckets/{bucket}/views/{view}" - pattern: "organizations/{organization}/locations/{location}/buckets/{bucket}/views/{view}" - pattern: "folders/{folder}/locations/{location}/buckets/{bucket}/views/{view}" - pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}/views/{view}" - }; - - // The resource name of the view. - // - // For example: - // - // `projects/my-project/locations/global/buckets/my-bucket/views/my-view` - string name = 1; - - // Describes this view. - string description = 3; - - // Output only. The creation timestamp of the view. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The last update timestamp of the view. - google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Filter that restricts which log entries in a bucket are visible in this - // view. - // - // Filters are restricted to be a logical AND of ==/!= of any of the - // following: - // - // - originating project/folder/organization/billing account. - // - resource type - // - log id - // - // For example: - // - // SOURCE("projects/myproject") AND resource.type = "gce_instance" - // AND LOG_ID("stdout") - string filter = 7; -} - -// Describes a sink used to export log entries to one of the following -// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, a -// Pub/Sub topic or a Cloud Logging log bucket. A logs filter controls which log -// entries are exported. The sink must be created within a project, -// organization, billing account, or folder. -message LogSink { - option (google.api.resource) = { - type: "logging.googleapis.com/LogSink" - pattern: "projects/{project}/sinks/{sink}" - pattern: "organizations/{organization}/sinks/{sink}" - pattern: "folders/{folder}/sinks/{sink}" - pattern: "billingAccounts/{billing_account}/sinks/{sink}" - }; - - // Deprecated. This is unused. - enum VersionFormat { - // An unspecified format version that will default to V2. - VERSION_FORMAT_UNSPECIFIED = 0; - - // `LogEntry` version 2 format. - V2 = 1; - - // `LogEntry` version 1 format. - V1 = 2; - } - - // Required. The client-assigned sink identifier, unique within the project. - // - // For example: `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited - // to 100 characters and can include only the following characters: upper and - // lower-case alphanumeric characters, underscores, hyphens, and periods. - // First character has to be alphanumeric. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The export destination: - // - // "storage.googleapis.com/[GCS_BUCKET]" - // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" - // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" - // - // The sink's `writer_identity`, set when the sink is created, must have - // permission to write to the destination or else the log entries are not - // exported. For more information, see - // [Exporting Logs with - // Sinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). - string destination = 3 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "*" - } - ]; - - // Optional. An [advanced logs - // filter](https://cloud.google.com/logging/docs/view/advanced-queries). The - // only exported log entries are those that are in the resource owning the - // sink and that match the filter. - // - // For example: - // - // `logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR` - string filter = 5 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A description of this sink. - // - // The maximum length of the description is 8000 characters. - string description = 18 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set to true, then this sink is disabled and it does not export any log - // entries. - bool disabled = 19 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Log entries that match any of these exclusion filters will not be exported. - // - // If a log entry is matched by both `filter` and one of `exclusion_filters` - // it will not be exported. - repeated LogExclusion exclusions = 16 [(google.api.field_behavior) = OPTIONAL]; - - // Deprecated. This field is unused. - VersionFormat output_version_format = 6 [deprecated = true]; - - // Output only. An IAM identity—a service account or group—under which Cloud - // Logging writes the exported log entries to the sink's destination. This - // field is set by - // [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] and - // [sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink] based on the - // value of `unique_writer_identity` in those methods. - // - // Until you grant this identity write-access to the destination, log entry - // exports from this sink will fail. For more information, see [Granting - // Access for a - // Resource](https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). - // Consult the destination service's documentation to determine the - // appropriate IAM roles to assign to the identity. - // - // Sinks that have a destination that is a log bucket in the same project as - // the sink do not have a writer_identity and no additional permissions are - // required. - string writer_identity = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Optional. This field applies only to sinks owned by organizations and folders. If the - // field is false, the default, only the logs owned by the sink's parent - // resource are available for export. If the field is true, then log entries - // from all the projects, folders, and billing accounts contained in the - // sink's parent resource are also available for export. Whether a particular - // log entry from the children is exported depends on the sink's filter - // expression. - // - // For example, if this field is true, then the filter - // `resource.type=gce_instance` would export all Compute Engine VM instance - // log entries from all projects in the sink's parent. - // - // To only export entries from certain child projects, filter on the project - // part of the log name: - // - // logName:("projects/test-project1/" OR "projects/test-project2/") AND - // resource.type=gce_instance - bool include_children = 9 [(google.api.field_behavior) = OPTIONAL]; - - // Destination dependent options. - oneof options { - // Optional. Options that affect sinks exporting data to BigQuery. - BigQueryOptions bigquery_options = 12 [(google.api.field_behavior) = OPTIONAL]; - } - - // Output only. The creation timestamp of the sink. - // - // This field may not be present for older sinks. - google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The last update timestamp of the sink. - // - // This field may not be present for older sinks. - google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Options that change functionality of a sink exporting data to BigQuery. -message BigQueryOptions { - // Optional. Whether to use [BigQuery's partition - // tables](https://cloud.google.com/bigquery/docs/partitioned-tables). By - // default, Cloud Logging creates dated tables based on the log entries' - // timestamps, e.g. syslog_20170523. With partitioned tables the date suffix - // is no longer present and [special query - // syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) - // has to be used instead. In both cases, tables are sharded based on UTC - // timezone. - bool use_partitioned_tables = 1 [(google.api.field_behavior) = OPTIONAL]; - - // Output only. True if new timestamp column based partitioning is in use, false if legacy - // ingestion-time partitioning is in use. - // - // All new sinks will have this field set true and will use timestamp column - // based partitioning. If use_partitioned_tables is false, this value has no - // meaning and will be false. Legacy sinks using partitioned tables will have - // this field set to false. - bool uses_timestamp_column_partitioning = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// The parameters to `ListBuckets`. -message ListBucketsRequest { - // Required. The parent resource whose buckets are to be listed: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]" - // - // Note: The locations portion of the resource must be specified, but - // supplying the character `-` in place of [LOCATION_ID] will return all - // buckets. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/LogBucket" - } - ]; - - // Optional. If present, then retrieve the next batch of results from the preceding call - // to this method. `pageToken` must be the value of `nextPageToken` from the - // previous response. The values of other method parameters should be - // identical to those in the previous call. - string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The maximum number of results to return from this request. Non-positive - // values are ignored. The presence of `nextPageToken` in the response - // indicates that more results might be available. - int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The response from ListBuckets. -message ListBucketsResponse { - // A list of buckets. - repeated LogBucket buckets = 1; - - // If there might be more results than appear in this response, then - // `nextPageToken` is included. To get the next set of results, call the same - // method again using the value of `nextPageToken` as `pageToken`. - string next_page_token = 2; -} - -// The parameters to `CreateBucket`. -message CreateBucketRequest { - // Required. The resource in which to create the log bucket: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" - // - // For example: - // - // `"projects/my-project/locations/global"` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/LogBucket" - } - ]; - - // Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are limited - // to 100 characters and can include only letters, digits, underscores, - // hyphens, and periods. - string bucket_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The new bucket. The region specified in the new bucket must be compliant - // with any Location Restriction Org Policy. The name field in the bucket is - // ignored. - LogBucket bucket = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// The parameters to `UpdateBucket`. -message UpdateBucketRequest { - // Required. The full resource name of the bucket to update. - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogBucket" - } - ]; - - // Required. The updated bucket. - LogBucket bucket = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. Field mask that specifies the fields in `bucket` that need an update. A - // bucket field will be overwritten if, and only if, it is in the update mask. - // `name` and output only fields cannot be updated. - // - // For a detailed `FieldMask` definition, see: - // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask - // - // For example: `updateMask=retention_days` - google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; -} - -// The parameters to `GetBucket`. -message GetBucketRequest { - // Required. The resource name of the bucket: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogBucket" - } - ]; -} - -// The parameters to `DeleteBucket`. -message DeleteBucketRequest { - // Required. The full resource name of the bucket to delete. - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogBucket" - } - ]; -} - -// The parameters to `UndeleteBucket`. -message UndeleteBucketRequest { - // Required. The full resource name of the bucket to undelete. - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogBucket" - } - ]; -} - -// The parameters to `ListViews`. -message ListViewsRequest { - // Required. The bucket whose views are to be listed: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. If present, then retrieve the next batch of results from the preceding call - // to this method. `pageToken` must be the value of `nextPageToken` from the - // previous response. The values of other method parameters should be - // identical to those in the previous call. - string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The maximum number of results to return from this request. - // - // Non-positive values are ignored. The presence of `nextPageToken` in the - // response indicates that more results might be available. - int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The response from ListViews. -message ListViewsResponse { - // A list of views. - repeated LogView views = 1; - - // If there might be more results than appear in this response, then - // `nextPageToken` is included. To get the next set of results, call the same - // method again using the value of `nextPageToken` as `pageToken`. - string next_page_token = 2; -} - -// The parameters to `CreateView`. -message CreateViewRequest { - // Required. The bucket in which to create the view - // - // `"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]"` - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket"` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The id to use for this view. - string view_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The new view. - LogView view = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// The parameters to `UpdateView`. -message UpdateViewRequest { - // Required. The full resource name of the view to update - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The updated view. - LogView view = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Field mask that specifies the fields in `view` that need - // an update. A field will be overwritten if, and only if, it is - // in the update mask. `name` and output only fields cannot be updated. - // - // For a detailed `FieldMask` definition, see - // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask - // - // For example: `updateMask=filter` - google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// The parameters to `GetView`. -message GetViewRequest { - // Required. The resource name of the policy: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogView" - } - ]; -} - -// The parameters to `DeleteView`. -message DeleteViewRequest { - // Required. The full resource name of the view to delete: - // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // - // For example: - // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogView" - } - ]; -} - -// The parameters to `ListSinks`. -message ListSinksRequest { - // Required. The parent resource whose sinks are to be listed: - // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/LogSink" - } - ]; - - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `pageToken` must be the value of - // `nextPageToken` from the previous response. The values of other method - // parameters should be identical to those in the previous call. - string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The maximum number of results to return from this request. - // Non-positive values are ignored. The presence of `nextPageToken` in the - // response indicates that more results might be available. - int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// Result returned from `ListSinks`. -message ListSinksResponse { - // A list of sinks. - repeated LogSink sinks = 1; - - // If there might be more results than appear in this response, then - // `nextPageToken` is included. To get the next set of results, call the same - // method again using the value of `nextPageToken` as `pageToken`. - string next_page_token = 2; -} - -// The parameters to `GetSink`. -message GetSinkRequest { - // Required. The resource name of the sink: - // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" - // - // For example: - // - // `"projects/my-project/sinks/my-sink"` - string sink_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogSink" - } - ]; -} - -// The parameters to `CreateSink`. -message CreateSinkRequest { - // Required. The resource in which to create the sink: - // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" - // - // For examples: - // - // `"projects/my-project"` - // `"organizations/123456789"` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "logging.googleapis.com/LogSink" - } - ]; - - // Required. The new sink, whose `name` parameter is a sink identifier that - // is not already in use. - LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Determines the kind of IAM identity returned as `writer_identity` - // in the new sink. If this value is omitted or set to false, and if the - // sink's parent is a project, then the value returned as `writer_identity` is - // the same group or service account used by Cloud Logging before the addition - // of writer identities to this API. The sink's destination must be in the - // same project as the sink itself. - // - // If this field is set to true, or if the sink is owned by a non-project - // resource such as an organization, then the value of `writer_identity` will - // be a unique service account used only for exports from the new sink. For - // more information, see `writer_identity` in [LogSink][google.logging.v2.LogSink]. - bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; -} - -// The parameters to `UpdateSink`. -message UpdateSinkRequest { - // Required. The full resource name of the sink to update, including the parent - // resource and the sink identifier: - // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" - // - // For example: - // - // `"projects/my-project/sinks/my-sink"` - string sink_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogSink" - } - ]; - - // Required. The updated sink, whose name is the same identifier that appears as part - // of `sink_name`. - LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. See [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] - // for a description of this field. When updating a sink, the effect of this - // field on the value of `writer_identity` in the updated sink depends on both - // the old and new values of this field: - // - // + If the old and new values of this field are both false or both true, - // then there is no change to the sink's `writer_identity`. - // + If the old value is false and the new value is true, then - // `writer_identity` is changed to a unique service account. - // + It is an error if the old value is true and the new value is - // set to false or defaulted to false. - bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Field mask that specifies the fields in `sink` that need - // an update. A sink field will be overwritten if, and only if, it is - // in the update mask. `name` and output only fields cannot be updated. - // - // An empty `updateMask` is temporarily treated as using the following mask - // for backwards compatibility purposes: - // - // `destination,filter,includeChildren` - // - // At some point in the future, behavior will be removed and specifying an - // empty `updateMask` will be an error. - // - // For a detailed `FieldMask` definition, see - // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask - // - // For example: `updateMask=filter` - google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// The parameters to `DeleteSink`. -message DeleteSinkRequest { - // Required. The full resource name of the sink to delete, including the parent - // resource and the sink identifier: - // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" - // - // For example: - // - // `"projects/my-project/sinks/my-sink"` - string sink_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "logging.googleapis.com/LogSink" - } - ]; -} - -// Specifies a set of log entries that are filtered out by a sink. If -// your Google Cloud resource receives a large volume of log entries, you can -// use exclusions to reduce your chargeable logs. Note that exclusions on -// organization-level and folder-level sinks don't apply to child resources. -// Note also that you cannot modify the _Required sink or exclude logs from it. -message LogExclusion { - option (google.api.resource) = { - type: "logging.googleapis.com/LogExclusion" - pattern: "projects/{project}/exclusions/{exclusion}" - pattern: "organizations/{organization}/exclusions/{exclusion}" - pattern: "folders/{folder}/exclusions/{exclusion}" - pattern: "billingAccounts/{billing_account}/exclusions/{exclusion}" - }; - - // Required. A client-assigned identifier, such as `"load-balancer-exclusion"`. - // Identifiers are limited to 100 characters and can include only letters, - // digits, underscores, hyphens, and periods. First character has to be - // alphanumeric. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. A description of this exclusion. - string description = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Required. An [advanced logs - // filter](https://cloud.google.com/logging/docs/view/advanced-queries) that - // matches the log entries to be excluded. By using the [sample - // function](https://cloud.google.com/logging/docs/view/advanced-queries#sample), - // you can exclude less than 100% of the matching log entries. - // - // For example, the following query matches 99% of low-severity log entries - // from Google Cloud Storage buckets: - // - // `resource.type=gcs_bucket severity>, - pending_count: Arc, - maximum_shutdown_duration: Duration, -} - -impl StackDriverExporter { - pub fn builder() -> Builder { - Builder::default() - } - - pub fn pending_count(&self) -> usize { - self.pending_count.load(Ordering::Relaxed) - } -} - -impl SpanExporter for StackDriverExporter { - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { - match self.tx.try_send(batch) { - Err(e) => Box::pin(std::future::ready(Err(TraceError::Other(Box::new( - e.into_send_error(), - ))))), - Ok(()) => { - self.pending_count.fetch_add(1, Ordering::Relaxed); - Box::pin(std::future::ready(Ok(()))) - } - } - } - - fn shutdown(&mut self) { - let start = Instant::now(); - while (Instant::now() - start) < self.maximum_shutdown_duration && self.pending_count() > 0 - { - std::thread::yield_now(); - // Spin for a bit and give the inner export some time to upload, with a timeout. - } - } -} - -impl fmt::Debug for StackDriverExporter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #[allow(clippy::unneeded_field_pattern)] - let Self { - tx: _, - pending_count, - maximum_shutdown_duration, - } = self; - f.debug_struct("StackDriverExporter") - .field("tx", &"(elided)") - .field("pending_count", pending_count) - .field("maximum_shutdown_duration", maximum_shutdown_duration) - .finish() - } -} - -/// Helper type to build a `StackDriverExporter`. -#[derive(Clone, Default)] -pub struct Builder { - maximum_shutdown_duration: Option, - num_concurrent_requests: Option, - log_context: Option, -} - -impl Builder { - /// Set the number of concurrent requests to send to StackDriver. - pub fn maximum_shutdown_duration(mut self, duration: Duration) -> Self { - self.maximum_shutdown_duration = Some(duration); - self - } - - /// Set the number of concurrent requests. - /// - /// If `num_concurrent_requests` is set to `0` or `None` then no limit is enforced. - pub fn num_concurrent_requests(mut self, num_concurrent_requests: usize) -> Self { - self.num_concurrent_requests = Some(num_concurrent_requests); - self - } - - /// Enable writing log entries with the given `log_context`. - pub fn log_context(mut self, log_context: LogContext) -> Self { - self.log_context = Some(log_context); - self - } - - pub async fn build( - self, - authenticator: A, - ) -> Result<(StackDriverExporter, impl Future), Error> - where - Error: From, - { - let Self { - maximum_shutdown_duration, - num_concurrent_requests, - log_context, - } = self; - let uri = http::uri::Uri::from_static("https://cloudtrace.googleapis.com:443"); - - let trace_channel = Channel::builder(uri) - .tls_config(ClientTlsConfig::new()) - .map_err(|e| Error::Transport(e.into()))? - .connect() - .await - .map_err(|e| Error::Transport(e.into()))?; - - let log_client = match log_context { - Some(log_context) => { - let log_channel = Channel::builder(http::uri::Uri::from_static( - "https://logging.googleapis.com:443", - )) - .tls_config(ClientTlsConfig::new()) - .map_err(|e| Error::Transport(e.into()))? - .connect() - .await - .map_err(|e| Error::Transport(e.into()))?; - - Some(LogClient { - client: LoggingServiceV2Client::new(log_channel), - context: Arc::new(InternalLogContext::from(log_context)), - }) - } - None => None, - }; - - let (tx, rx) = futures_channel::mpsc::channel(64); - let pending_count = Arc::new(AtomicUsize::new(0)); - let scopes = Arc::new(match log_client { - Some(_) => vec![TRACE_APPEND, LOGGING_WRITE], - None => vec![TRACE_APPEND], - }); - - let count_clone = pending_count.clone(); - let future = async move { - let trace_client = TraceServiceClient::new(trace_channel); - let authorizer = &authenticator; - let log_client = log_client.clone(); - rx.for_each_concurrent(num_concurrent_requests, move |batch| { - let trace_client = trace_client.clone(); - let log_client = log_client.clone(); - let pending_count = count_clone.clone(); - let scopes = scopes.clone(); - ExporterContext { - trace_client, - log_client, - authorizer, - pending_count, - scopes, - } - .export(batch) - }) - .await - }; - - let exporter = StackDriverExporter { - tx, - pending_count, - maximum_shutdown_duration: maximum_shutdown_duration - .unwrap_or_else(|| Duration::from_secs(5)), - }; - - Ok((exporter, future)) - } -} - -struct ExporterContext<'a, A> { - trace_client: TraceServiceClient, - log_client: Option, - authorizer: &'a A, - pending_count: Arc, - scopes: Arc>, -} - -impl ExporterContext<'_, A> -where - Error: From, -{ - async fn export(mut self, batch: Vec) { - use proto::devtools::cloudtrace::v2::span::time_event::Value; - - let mut entries = Vec::new(); - let mut spans = Vec::with_capacity(batch.len()); - for span in batch { - let trace_id = hex::encode(span.span_context.trace_id().to_bytes()); - let span_id = hex::encode(span.span_context.span_id().to_bytes()); - let time_event = match &self.log_client { - None => span - .events - .into_iter() - .map(|event| TimeEvent { - time: Some(event.timestamp.into()), - value: Some(Value::Annotation(Annotation { - description: Some(to_truncate(event.name.into_owned())), - ..Default::default() - })), - }) - .collect(), - Some(client) => { - entries.extend(span.events.into_iter().map(|event| { - let (mut level, mut target, mut labels) = - (LogSeverity::Default, None, HashMap::default()); - for kv in event.attributes { - match kv.key.as_str() { - "level" => { - level = match kv.value.as_str().as_ref() { - "DEBUG" | "TRACE" => LogSeverity::Debug, - "INFO" => LogSeverity::Info, - "WARN" => LogSeverity::Warning, - "ERROR" => LogSeverity::Error, - _ => LogSeverity::Default, // tracing::Level is limited to the above 5 - } - } - "target" => target = Some(kv.value.as_str().into_owned()), - key => { - labels.insert(key.to_owned(), kv.value.as_str().into_owned()); - } - } - } - let project_id = self.authorizer.project_id(); - let log_id = &client.context.log_id; - LogEntry { - log_name: format!("projects/{project_id}/logs/{log_id}"), - resource: Some(client.context.resource.clone()), - severity: level as i32, - timestamp: Some(event.timestamp.into()), - labels, - trace: format!("projects/{project_id}/traces/{trace_id}"), - span_id: span_id.clone(), - source_location: target.map(|target| LogEntrySourceLocation { - file: String::new(), - line: 0, - function: target, - }), - payload: Some(Payload::TextPayload(event.name.into_owned())), - // severity, source_location, text_payload - ..Default::default() - } - })); - - vec![] - } - }; - - spans.push(Span { - name: format!( - "projects/{}/traces/{}/spans/{}", - self.authorizer.project_id(), - hex::encode(span.span_context.trace_id().to_bytes()), - hex::encode(span.span_context.span_id().to_bytes()) - ), - display_name: Some(to_truncate(span.name.into_owned())), - span_id: hex::encode(span.span_context.span_id().to_bytes()), - // From the API docs: If this is a root span, - // then this field must be empty. - parent_span_id: match span.parent_span_id { - SpanId::INVALID => "".to_owned(), - _ => hex::encode(span.parent_span_id.to_bytes()), - }, - start_time: Some(span.start_time.into()), - end_time: Some(span.end_time.into()), - attributes: Some((span.attributes, span.resource.as_ref()).into()), - time_events: Some(TimeEvents { - time_event, - ..Default::default() - }), - links: transform_links(&span.links), - status: status(span.status), - span_kind: SpanKind::from(span.span_kind) as i32, - ..Default::default() - }); - } - - let mut req = Request::new(BatchWriteSpansRequest { - name: format!("projects/{}", self.authorizer.project_id()), - spans, - }); - - self.pending_count.fetch_sub(1, Ordering::Relaxed); - if let Err(e) = self.authorizer.authorize(&mut req, &self.scopes).await { - handle_error(TraceError::from(Error::Authorizer(e.into()))); - } else if let Err(e) = self.trace_client.batch_write_spans(req).await { - handle_error(TraceError::from(Error::Transport(e.into()))); - } - - let client = match &mut self.log_client { - Some(client) => client, - None => return, - }; - - let mut req = Request::new(WriteLogEntriesRequest { - log_name: format!( - "projects/{}/logs/{}", - self.authorizer.project_id(), - client.context.log_id, - ), - entries, - dry_run: false, - labels: HashMap::default(), - partial_success: true, - resource: None, - }); - - if let Err(e) = self.authorizer.authorize(&mut req, &self.scopes).await { - handle_error(TraceError::from(Error::from(e))); - } else if let Err(e) = client.client.write_log_entries(req).await { - handle_error(TraceError::from(Error::Transport(e.into()))); - } - } -} - -#[cfg(feature = "yup-authorizer")] -pub struct YupAuthorizer { - authenticator: Authenticator>, - project_id: String, -} - -#[cfg(feature = "yup-authorizer")] -impl YupAuthorizer { - pub async fn new( - credentials_path: impl AsRef, - persistent_token_file: impl Into>, - ) -> Result { - let service_account_key = yup_oauth2::read_service_account_key(&credentials_path).await?; - let project_id = service_account_key - .project_id - .as_ref() - .ok_or_else(|| Error::Other("project_id is missing".into()))? - .clone(); - let mut authenticator = - yup_oauth2::ServiceAccountAuthenticator::builder(service_account_key); - if let Some(persistent_token_file) = persistent_token_file.into() { - authenticator = authenticator.persist_tokens_to_disk(persistent_token_file); - } - - Ok(Self { - authenticator: authenticator.build().await?, - project_id, - }) - } -} - -#[cfg(feature = "yup-authorizer")] -#[async_trait] -impl Authorizer for YupAuthorizer { - type Error = Error; - - fn project_id(&self) -> &str { - &self.project_id - } - - async fn authorize( - &self, - req: &mut Request, - scopes: &[&str], - ) -> Result<(), Self::Error> { - let token = self - .authenticator - .token(scopes) - .await - .map_err(|e| Error::Authorizer(e.into()))?; - - let token = match token.token() { - Some(token) => token, - None => return Err(Error::Other("unable to access token contents".into())), - }; - - req.metadata_mut().insert( - "authorization", - MetadataValue::try_from(format!("Bearer {}", token)).unwrap(), - ); - Ok(()) - } -} - -#[cfg(feature = "gcp_auth")] -pub struct GcpAuthorizer { - manager: gcp_auth::AuthenticationManager, - project_id: String, -} - -#[cfg(feature = "gcp_auth")] -impl GcpAuthorizer { - pub async fn new() -> Result { - let manager = gcp_auth::AuthenticationManager::new() - .await - .map_err(|e| Error::Authorizer(e.into()))?; - - let project_id = manager - .project_id() - .await - .map_err(|e| Error::Authorizer(e.into()))?; - - Ok(Self { - manager, - project_id, - }) - } - pub fn from_gcp_auth(manager: gcp_auth::AuthenticationManager, project_id: String) -> Self { - Self { - manager, - project_id, - } - } -} - -#[cfg(feature = "gcp_auth")] -#[async_trait] -impl Authorizer for GcpAuthorizer { - type Error = Error; - - fn project_id(&self) -> &str { - &self.project_id - } - - async fn authorize( - &self, - req: &mut Request, - scopes: &[&str], - ) -> Result<(), Self::Error> { - let token = self - .manager - .get_token(scopes) - .await - .map_err(|e| Error::Authorizer(e.into()))?; - - req.metadata_mut().insert( - "authorization", - MetadataValue::try_from(format!("Bearer {}", token.as_str())).unwrap(), - ); - - Ok(()) - } -} - -#[async_trait] -pub trait Authorizer: Sync + Send + 'static { - type Error: std::error::Error + fmt::Debug + Send + Sync; - - fn project_id(&self) -> &str; - async fn authorize( - &self, - request: &mut Request, - scopes: &[&str], - ) -> Result<(), Self::Error>; -} - -impl From for AttributeValue { - fn from(v: Value) -> AttributeValue { - use proto::devtools::cloudtrace::v2::attribute_value; - let new_value = match v { - Value::Bool(v) => attribute_value::Value::BoolValue(v), - Value::F64(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), - Value::I64(v) => attribute_value::Value::IntValue(v), - Value::String(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), - Value::Array(_) => attribute_value::Value::StringValue(to_truncate(v.to_string())), - }; - AttributeValue { - value: Some(new_value), - } - } -} - -fn to_truncate(s: String) -> TruncatableString { - TruncatableString { - value: s, - ..Default::default() - } -} - -#[derive(Debug, Error)] -pub enum Error { - #[error("authorizer error: {0}")] - Authorizer(#[source] Box), - #[error("I/O error: {0}")] - Io(#[from] std::io::Error), - #[error("{0}")] - Other(#[from] Box), - #[error("tonic error: {0}")] - Transport(#[source] Box), -} - -impl ExportError for Error { - fn exporter_name(&self) -> &'static str { - "stackdriver" - } -} - -/// As defined in https://cloud.google.com/logging/docs/reference/v2/rpc/google.logging.type#google.logging.type.LogSeverity. -enum LogSeverity { - Default = 0, - Debug = 100, - Info = 200, - Warning = 400, - Error = 500, -} - -#[derive(Clone)] -struct LogClient { - client: LoggingServiceV2Client, - context: Arc, -} - -struct InternalLogContext { - log_id: String, - resource: proto::api::MonitoredResource, -} - -#[derive(Clone)] -pub struct LogContext { - pub log_id: String, - pub resource: MonitoredResource, -} - -impl From for InternalLogContext { - fn from(cx: LogContext) -> Self { - let mut labels = HashMap::default(); - let resource = match cx.resource { - MonitoredResource::CloudRunRevision { - project_id, - service_name, - revision_name, - location, - configuration_name, - } => { - labels.insert("project_id".to_string(), project_id); - if let Some(service_name) = service_name { - labels.insert("service_name".to_string(), service_name); - } - if let Some(revision_name) = revision_name { - labels.insert("revision_name".to_string(), revision_name); - } - if let Some(location) = location { - labels.insert("location".to_string(), location); - } - if let Some(configuration_name) = configuration_name { - labels.insert("configuration_name".to_string(), configuration_name); - } - - proto::api::MonitoredResource { - r#type: "cloud_run_revision".to_owned(), - labels, - } - } - MonitoredResource::GenericNode { - project_id, - location, - namespace, - node_id, - } => { - labels.insert("project_id".to_string(), project_id); - if let Some(location) = location { - labels.insert("location".to_string(), location); - } - if let Some(namespace) = namespace { - labels.insert("namespace".to_string(), namespace); - } - if let Some(node_id) = node_id { - labels.insert("node_id".to_string(), node_id); - } - - proto::api::MonitoredResource { - r#type: "generic_node".to_owned(), - labels, - } - } - MonitoredResource::GenericTask { - project_id, - location, - namespace, - job, - task_id, - } => { - labels.insert("project_id".to_owned(), project_id); - if let Some(location) = location { - labels.insert("location".to_owned(), location); - } - if let Some(namespace) = namespace { - labels.insert("namespace".to_owned(), namespace); - } - if let Some(job) = job { - labels.insert("job".to_owned(), job); - } - if let Some(task_id) = task_id { - labels.insert("task_id".to_owned(), task_id); - } - - proto::api::MonitoredResource { - r#type: "generic_task".to_owned(), - labels, - } - } - MonitoredResource::Global { project_id } => { - labels.insert("project_id".to_owned(), project_id); - proto::api::MonitoredResource { - r#type: "global".to_owned(), - labels, - } - } - }; - - Self { - log_id: cx.log_id, - resource, - } - } -} - -/// A description of a `MonitoredResource`. -/// -/// Possible values are listed in the [API documentation](https://cloud.google.com/logging/docs/api/v2/resource-list). -/// Please submit an issue or pull request if you want to use a resource type not listed here. -#[derive(Clone)] -pub enum MonitoredResource { - Global { - project_id: String, - }, - GenericNode { - project_id: String, - location: Option, - namespace: Option, - node_id: Option, - }, - GenericTask { - project_id: String, - location: Option, - namespace: Option, - job: Option, - task_id: Option, - }, - CloudRunRevision { - project_id: String, - service_name: Option, - revision_name: Option, - location: Option, - configuration_name: Option, - }, -} - -impl From<(Vec, &Resource)> for Attributes { - /// Combines `EvictedHashMap` and `Resource` attributes into a maximum of 32. - /// - /// The `Resource` takes precedence over the `EvictedHashMap` attributes. - fn from((attributes, resource): (Vec, &Resource)) -> Self { - let mut dropped_attributes_count: i32 = 0; - let num_resource_attributes = resource.len(); - let num_attributes = attributes.len(); - - let attributes_as_key_value_tuples: Vec<(Key, Value)> = attributes - .into_iter() - .map(|kv| (kv.key, kv.value)) - .collect(); - - let attribute_map = resource - .into_iter() - .map(|(k, v)| (k.clone(), v.clone())) - .chain(attributes_as_key_value_tuples) - .flat_map(|(k, v)| { - let key = k.as_str(); - if key.len() > 128 { - dropped_attributes_count += 1; - return None; - } - - if k.as_str() == semconv::resource::SERVICE_NAME { - return Some((GCP_SERVICE_NAME.to_owned(), v.into())); - } else if key == HTTP_PATH { - return Some((GCP_HTTP_PATH.to_owned(), v.into())); - } - - for (otel_key, gcp_key) in KEY_MAP { - if otel_key == k.as_str() { - return Some((gcp_key.to_owned(), v.into())); - } - } - - Some((key.to_owned(), v.into())) - }) - .take(MAX_ATTRIBUTES_PER_SPAN) - .collect(); - - Attributes { - attribute_map, - dropped_attributes_count: dropped_attributes_count - + (num_resource_attributes + num_attributes).saturating_sub(MAX_ATTRIBUTES_PER_SPAN) - as i32, - } - } -} - -fn transform_links(links: &opentelemetry_sdk::trace::SpanLinks) -> Option { - if links.is_empty() { - return None; - } - - Some(Links { - dropped_links_count: links.dropped_count as i32, - link: links - .iter() - .map(|link| Link { - trace_id: hex::encode(link.span_context.trace_id().to_bytes()), - span_id: hex::encode(link.span_context.span_id().to_bytes()), - ..Default::default() - }) - .collect(), - }) -} - -// Map conventional OpenTelemetry keys to their GCP counterparts. -const KEY_MAP: [(&str, &str); 8] = [ - (HTTP_HOST, "/http/host"), - (semconv::trace::HTTP_METHOD, "/http/method"), - (semconv::trace::HTTP_TARGET, "/http/path"), - (semconv::trace::HTTP_URL, "/http/url"), - (HTTP_USER_AGENT, "/http/user_agent"), - (semconv::trace::HTTP_STATUS_CODE, "/http/status_code"), - (semconv::trace::HTTP_ROUTE, "/http/route"), - (HTTP_PATH, GCP_HTTP_PATH), -]; - -impl From for SpanKind { - fn from(span_kind: opentelemetry::trace::SpanKind) -> Self { - match span_kind { - opentelemetry::trace::SpanKind::Client => SpanKind::Client, - opentelemetry::trace::SpanKind::Server => SpanKind::Server, - opentelemetry::trace::SpanKind::Producer => SpanKind::Producer, - opentelemetry::trace::SpanKind::Consumer => SpanKind::Consumer, - opentelemetry::trace::SpanKind::Internal => SpanKind::Internal, - } - } -} - -fn status(value: opentelemetry::trace::Status) -> Option { - match value { - opentelemetry::trace::Status::Ok => Some(Status { - code: Code::Ok as i32, - message: "".to_owned(), - details: vec![], - }), - opentelemetry::trace::Status::Unset => None, - opentelemetry::trace::Status::Error { description } => Some(Status { - code: Code::Unknown as i32, - message: description.into(), - details: vec![], - }), - } -} -const TRACE_APPEND: &str = "https://www.googleapis.com/auth/trace.append"; -const LOGGING_WRITE: &str = "https://www.googleapis.com/auth/logging.write"; -const GCP_SERVICE_NAME: &str = "g.co/gae/app/module"; -const MAX_ATTRIBUTES_PER_SPAN: usize = 32; - -#[cfg(test)] -mod tests { - use super::*; - use opentelemetry::{KeyValue, Value}; - use opentelemetry_semantic_conventions as semcov; - - #[test] - fn test_attributes_mapping() { - let capacity = 10; - let mut attributes = Vec::with_capacity(capacity); - - // hostAttribute = "http.host" - attributes.push(KeyValue::new(HTTP_HOST, "example.com:8080")); - - // methodAttribute = "http.method" - attributes.push(KeyValue::new(semcov::trace::HTTP_METHOD, "POST")); - - // pathAttribute = "http.path" - attributes.push(KeyValue::new(HTTP_PATH, "/path/12314/?q=ddds#123")); - - // urlAttribute = "http.url" - attributes.push(KeyValue::new( - semcov::trace::HTTP_URL, - "https://example.com:8080/webshop/articles/4?s=1", - )); - - // userAgentAttribute = "http.user_agent" - attributes.push(KeyValue::new( - HTTP_USER_AGENT, - "CERN-LineMode/2.15 libwww/2.17b3", - )); - - // statusCodeAttribute = "http.status_code" - attributes.push(KeyValue::new(semcov::trace::HTTP_STATUS_CODE, 200i64)); - - // statusCodeAttribute = "http.route" - attributes.push(KeyValue::new( - semcov::trace::HTTP_ROUTE, - "/webshop/articles/:article_id", - )); - - // serviceAttribute = "service.name" - let resources = Resource::new([KeyValue::new( - semcov::resource::SERVICE_NAME, - "Test Service Name", - )]); - - let actual: Attributes = (attributes, &resources).into(); - - assert_eq!(actual.attribute_map.len(), 8); - assert_eq!(actual.dropped_attributes_count, 0); - assert_eq!( - actual.attribute_map.get("/http/host"), - Some(&AttributeValue::from(Value::String( - "example.com:8080".into() - ))) - ); - assert_eq!( - actual.attribute_map.get("/http/method"), - Some(&AttributeValue::from(Value::String("POST".into()))), - ); - assert_eq!( - actual.attribute_map.get("/http/path"), - Some(&AttributeValue::from(Value::String( - "/path/12314/?q=ddds#123".into() - ))), - ); - assert_eq!( - actual.attribute_map.get("/http/route"), - Some(&AttributeValue::from(Value::String( - "/webshop/articles/:article_id".into() - ))), - ); - assert_eq!( - actual.attribute_map.get("/http/url"), - Some(&AttributeValue::from(Value::String( - "https://example.com:8080/webshop/articles/4?s=1".into(), - ))), - ); - assert_eq!( - actual.attribute_map.get("/http/user_agent"), - Some(&AttributeValue::from(Value::String( - "CERN-LineMode/2.15 libwww/2.17b3".into() - ))), - ); - assert_eq!( - actual.attribute_map.get("/http/status_code"), - Some(&AttributeValue::from(Value::I64(200))), - ); - assert_eq!( - actual.attribute_map.get("g.co/gae/app/module"), - Some(&AttributeValue::from(Value::String( - "Test Service Name".into() - ))), - ); - } - - #[test] - fn test_too_many() { - let resources = Resource::new([KeyValue::new( - semcov::resource::SERVICE_NAME, - "Test Service Name", - )]); - let mut attributes = Vec::with_capacity(32); - for i in 0..32 { - attributes.push(KeyValue::new( - format!("key{}", i), - Value::String(format!("value{}", i).into()), - )); - } - - let actual: Attributes = (attributes, &resources).into(); - - assert_eq!(actual.attribute_map.len(), 32); - assert_eq!(actual.dropped_attributes_count, 1); - assert_eq!( - actual.attribute_map.get("g.co/gae/app/module"), - Some(&AttributeValue::from(Value::String( - "Test Service Name".into() - ))), - ); - } - - #[test] - fn test_attributes_mapping_http_target() { - let attributes = vec![KeyValue::new( - semcov::trace::HTTP_TARGET, - "/path/12314/?q=ddds#123", - )]; - - // hostAttribute = "http.target" - - let resources = Resource::new([]); - let actual: Attributes = (attributes, &resources).into(); - - assert_eq!(actual.attribute_map.len(), 1); - assert_eq!(actual.dropped_attributes_count, 0); - assert_eq!( - actual.attribute_map.get("/http/path"), - Some(&AttributeValue::from(Value::String( - "/path/12314/?q=ddds#123".into() - ))), - ); - } - - #[test] - fn test_attributes_mapping_dropped_attributes_count() { - let attributes = vec![KeyValue::new("answer", Value::I64(42)),KeyValue::new("long_attribute_key_dvwmacxpeefbuemoxljmqvldjxmvvihoeqnuqdsyovwgljtnemouidabhkmvsnauwfnaihekcfwhugejboiyfthyhmkpsaxtidlsbwsmirebax", Value::String("Some value".into()))]; - - let resources = Resource::new([]); - let actual: Attributes = (attributes, &resources).into(); - assert_eq!( - actual, - Attributes { - attribute_map: HashMap::from([( - "answer".into(), - AttributeValue::from(Value::I64(42)) - ),]), - dropped_attributes_count: 1, - } - ); - assert_eq!(actual.attribute_map.len(), 1); - assert_eq!(actual.dropped_attributes_count, 1); - } -} diff --git a/opentelemetry-stackdriver/src/proto/api.rs b/opentelemetry-stackdriver/src/proto/api.rs deleted file mode 100644 index eeeb2f2ca0..0000000000 --- a/opentelemetry-stackdriver/src/proto/api.rs +++ /dev/null @@ -1,1237 +0,0 @@ -/// Defines the HTTP configuration for an API service. It contains a list of -/// \[HttpRule][google.api.HttpRule\], each specifying the mapping of an RPC method -/// to one or more HTTP REST API methods. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Http { - /// A list of HTTP configuration rules that apply to individual API methods. - /// - /// **NOTE:** All service configuration rules follow "last one wins" order. - #[prost(message, repeated, tag = "1")] - pub rules: ::prost::alloc::vec::Vec, - /// When set to true, URL path parameters will be fully URI-decoded except in - /// cases of single segment matches in reserved expansion, where "%2F" will be - /// left encoded. - /// - /// The default behavior is to not decode RFC 6570 reserved characters in multi - /// segment matches. - #[prost(bool, tag = "2")] - pub fully_decode_reserved_expansion: bool, -} -/// # gRPC Transcoding -/// -/// gRPC Transcoding is a feature for mapping between a gRPC method and one or -/// more HTTP REST endpoints. It allows developers to build a single API service -/// that supports both gRPC APIs and REST APIs. Many systems, including [Google -/// APIs](), -/// [Cloud Endpoints](), [gRPC -/// Gateway](), -/// and \[Envoy\]() proxy support this feature -/// and use it for large scale production services. -/// -/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -/// how different portions of the gRPC request message are mapped to the URL -/// path, URL query parameters, and HTTP request body. It also controls how the -/// gRPC response message is mapped to the HTTP response body. `HttpRule` is -/// typically specified as an `google.api.http` annotation on the gRPC method. -/// -/// Each mapping specifies a URL path template and an HTTP method. The path -/// template may refer to one or more fields in the gRPC request message, as long -/// as each field is a non-repeated field with a primitive (non-message) type. -/// The path template controls how fields of the request message are mapped to -/// the URL path. -/// -/// Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/{name=messages/*}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string name = 1; // Mapped to URL path. -/// } -/// message Message { -/// string text = 1; // The resource content. -/// } -/// -/// This enables an HTTP REST to gRPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -/// -/// Any fields in the request message which are not bound by the path template -/// automatically become HTTP query parameters if there is no HTTP request body. -/// For example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get:"/v1/messages/{message_id}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// message SubMessage { -/// string subfield = 1; -/// } -/// string message_id = 1; // Mapped to URL path. -/// int64 revision = 2; // Mapped to URL query parameter `revision`. -/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -/// } -/// -/// This enables a HTTP JSON to RPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -/// "foo"))` -/// -/// Note that fields which are mapped to URL query parameters must have a -/// primitive type or a repeated primitive type or a non-repeated message type. -/// In the case of a repeated type, the parameter can be repeated in the URL -/// as `...?param=A¶m=B`. In the case of a message type, each field of the -/// message is mapped to a separate parameter, such as -/// `...?foo.a=A&foo.b=B&foo.c=C`. -/// -/// For HTTP methods that allow a request body, the `body` field -/// specifies the mapping. Consider a REST update method on the -/// message resource collection: -/// -/// service Messaging { -/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "message" -/// }; -/// } -/// } -/// message UpdateMessageRequest { -/// string message_id = 1; // mapped to the URL -/// Message message = 2; // mapped to the body -/// } -/// -/// The following HTTP JSON to RPC mapping is enabled, where the -/// representation of the JSON in the request body is determined by -/// protos JSON encoding: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" message { text: "Hi!" })` -/// -/// The special name `*` can be used in the body mapping to define that -/// every field not bound by the path template should be mapped to the -/// request body. This enables the following alternative definition of -/// the update method: -/// -/// service Messaging { -/// rpc UpdateMessage(Message) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "*" -/// }; -/// } -/// } -/// message Message { -/// string message_id = 1; -/// string text = 2; -/// } -/// -/// -/// The following HTTP JSON to RPC mapping is enabled: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" text: "Hi!")` -/// -/// Note that when using `*` in the body mapping, it is not possible to -/// have HTTP parameters, as all fields not bound by the path end in -/// the body. This makes this option more rarely used in practice when -/// defining REST APIs. The common usage of `*` is in custom methods -/// which don't use the URL at all for transferring data. -/// -/// It is possible to define multiple HTTP methods for one RPC by using -/// the `additional_bindings` option. Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/messages/{message_id}" -/// additional_bindings { -/// get: "/v1/users/{user_id}/messages/{message_id}" -/// } -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string message_id = 1; -/// string user_id = 2; -/// } -/// -/// This enables the following two alternative HTTP JSON to RPC mappings: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -/// "123456")` -/// -/// ## Rules for HTTP mapping -/// -/// 1. Leaf request fields (recursive expansion nested messages in the request -/// message) are classified into three categories: -/// - Fields referred by the path template. They are passed via the URL path. -/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They are passed via the HTTP -/// request body. -/// - All other fields are passed via the URL query parameters, and the -/// parameter name is the field path in the request message. A repeated -/// field can be represented as multiple query parameters under the same -/// name. -/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL query parameter, all fields -/// are passed via URL path and HTTP request body. -/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP request body, all -/// fields are passed via URL path and URL query parameters. -/// -/// ### Path template syntax -/// -/// Template = "/" Segments [ Verb ] ; -/// Segments = Segment { "/" Segment } ; -/// Segment = "*" | "**" | LITERAL | Variable ; -/// Variable = "{" FieldPath [ "=" Segments ] "}" ; -/// FieldPath = IDENT { "." IDENT } ; -/// Verb = ":" LITERAL ; -/// -/// The syntax `*` matches a single URL path segment. The syntax `**` matches -/// zero or more URL path segments, which must be the last part of the URL path -/// except the `Verb`. -/// -/// The syntax `Variable` matches part of the URL path as specified by its -/// template. A variable template must not contain other variables. If a variable -/// matches a single path segment, its template may be omitted, e.g. `{var}` -/// is equivalent to `{var=*}`. -/// -/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -/// contains any reserved character, such characters should be percent-encoded -/// before the matching. -/// -/// If a variable contains exactly one path segment, such as `"{var}"` or -/// `"{var=*}"`, when such a variable is expanded into a URL path on the client -/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The -/// server side does the reverse decoding. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{var}`. -/// -/// If a variable contains multiple path segments, such as `"{var=foo/*}"` -/// or `"{var=**}"`, when such a variable is expanded into a URL path on the -/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. -/// The server side does the reverse decoding, except "%2F" and "%2f" are left -/// unchanged. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{+var}`. -/// -/// ## Using gRPC API Service Configuration -/// -/// gRPC API Service Configuration (service config) is a configuration language -/// for configuring a gRPC service to become a user-facing product. The -/// service config is simply the YAML representation of the `google.api.Service` -/// proto message. -/// -/// As an alternative to annotating your proto file, you can configure gRPC -/// transcoding in your service config YAML files. You do this by specifying a -/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -/// effect as the proto annotation. This can be particularly useful if you -/// have a proto that is reused in multiple services. Note that any transcoding -/// specified in the service config will override any matching transcoding -/// configuration in the proto. -/// -/// Example: -/// -/// http: -/// rules: -/// # Selects a gRPC method and applies HttpRule to it. -/// - selector: example.v1.Messaging.GetMessage -/// get: /v1/messages/{message_id}/{sub.subfield} -/// -/// ## Special notes -/// -/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -/// proto to JSON conversion must follow the [proto3 -/// specification](). -/// -/// While the single segment variable follows the semantics of -/// [RFC 6570]() Section 3.2.2 Simple String -/// Expansion, the multi segment variable **does not** follow RFC 6570 Section -/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -/// does not expand special characters like `?` and `#`, which would lead -/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -/// for multi segment variables. -/// -/// The path variables **must not** refer to any repeated or mapped field, -/// because client libraries are not capable of handling such variable expansion. -/// -/// The path variables **must not** capture the leading "/" character. The reason -/// is that the most common use case "{var}" does not capture the leading "/" -/// character. For consistency, all path variables must share the same behavior. -/// -/// Repeated message fields must not be mapped to URL query parameters, because -/// no client library can support such complicated mapping. -/// -/// If an API needs to use a JSON array for request or response body, it can map -/// the request or response body to a repeated field. However, some gRPC -/// Transcoding implementations may not support this feature. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HttpRule { - /// Selects a method to which this rule applies. - /// - /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax details. - #[prost(string, tag = "1")] - pub selector: ::prost::alloc::string::String, - /// The name of the request field whose value is mapped to the HTTP request - /// body, or `*` for mapping all request fields not captured by the path - /// pattern to the HTTP body, or omitted for not having any HTTP request body. - /// - /// NOTE: the referred field must be present at the top-level of the request - /// message type. - #[prost(string, tag = "7")] - pub body: ::prost::alloc::string::String, - /// Optional. The name of the response field whose value is mapped to the HTTP - /// response body. When omitted, the entire response message will be used - /// as the HTTP response body. - /// - /// NOTE: The referred field must be present at the top-level of the response - /// message type. - #[prost(string, tag = "12")] - pub response_body: ::prost::alloc::string::String, - /// Additional HTTP bindings for the selector. Nested bindings must - /// not contain an `additional_bindings` field themselves (that is, - /// the nesting may only be one level deep). - #[prost(message, repeated, tag = "11")] - pub additional_bindings: ::prost::alloc::vec::Vec, - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] - pub pattern: ::core::option::Option, -} -/// Nested message and enum types in `HttpRule`. -pub mod http_rule { - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Pattern { - /// Maps to HTTP GET. Used for listing and getting information about - /// resources. - #[prost(string, tag = "2")] - Get(::prost::alloc::string::String), - /// Maps to HTTP PUT. Used for replacing a resource. - #[prost(string, tag = "3")] - Put(::prost::alloc::string::String), - /// Maps to HTTP POST. Used for creating a resource or performing an action. - #[prost(string, tag = "4")] - Post(::prost::alloc::string::String), - /// Maps to HTTP DELETE. Used for deleting a resource. - #[prost(string, tag = "5")] - Delete(::prost::alloc::string::String), - /// Maps to HTTP PATCH. Used for updating a resource. - #[prost(string, tag = "6")] - Patch(::prost::alloc::string::String), - /// The custom pattern is used for specifying an HTTP method that is not - /// included in the `pattern` field, such as HEAD, or "*" to leave the - /// HTTP method unspecified for this rule. The wild-card rule is useful - /// for services that provide content to Web (HTML) clients. - #[prost(message, tag = "8")] - Custom(super::CustomHttpPattern), - } -} -/// A custom pattern is used for defining custom HTTP verb. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CustomHttpPattern { - /// The name of this custom HTTP verb. - #[prost(string, tag = "1")] - pub kind: ::prost::alloc::string::String, - /// The path matched by this custom verb. - #[prost(string, tag = "2")] - pub path: ::prost::alloc::string::String, -} -/// The launch stage as defined by [Google Cloud Platform -/// Launch Stages](). -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum LaunchStage { - /// Do not use this default value. - Unspecified = 0, - /// The feature is not yet implemented. Users can not use it. - Unimplemented = 6, - /// Prelaunch features are hidden from users and are only visible internally. - Prelaunch = 7, - /// Early Access features are limited to a closed group of testers. To use - /// these features, you must sign up in advance and sign a Trusted Tester - /// agreement (which includes confidentiality provisions). These features may - /// be unstable, changed in backward-incompatible ways, and are not - /// guaranteed to be released. - EarlyAccess = 1, - /// Alpha is a limited availability test for releases before they are cleared - /// for widespread use. By Alpha, all significant design issues are resolved - /// and we are in the process of verifying functionality. Alpha customers - /// need to apply for access, agree to applicable terms, and have their - /// projects allowlisted. Alpha releases don't have to be feature complete, - /// no SLAs are provided, and there are no technical support obligations, but - /// they will be far enough along that customers can actually use them in - /// test environments or for limited-use tests -- just like they would in - /// normal production cases. - Alpha = 2, - /// Beta is the point at which we are ready to open a release for any - /// customer to use. There are no SLA or technical support obligations in a - /// Beta release. Products will be complete from a feature perspective, but - /// may have some open outstanding issues. Beta releases are suitable for - /// limited production use cases. - Beta = 3, - /// GA features are open to all developers and are considered stable and - /// fully qualified for production use. - Ga = 4, - /// Deprecated features are scheduled to be shut down and removed. For more - /// information, see the "Deprecation Policy" section of our [Terms of - /// Service]() - /// and the [Google Cloud Platform Subject to the Deprecation - /// Policy]() documentation. - Deprecated = 5, -} -impl LaunchStage { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - LaunchStage::Unspecified => "LAUNCH_STAGE_UNSPECIFIED", - LaunchStage::Unimplemented => "UNIMPLEMENTED", - LaunchStage::Prelaunch => "PRELAUNCH", - LaunchStage::EarlyAccess => "EARLY_ACCESS", - LaunchStage::Alpha => "ALPHA", - LaunchStage::Beta => "BETA", - LaunchStage::Ga => "GA", - LaunchStage::Deprecated => "DEPRECATED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "LAUNCH_STAGE_UNSPECIFIED" => Some(Self::Unspecified), - "UNIMPLEMENTED" => Some(Self::Unimplemented), - "PRELAUNCH" => Some(Self::Prelaunch), - "EARLY_ACCESS" => Some(Self::EarlyAccess), - "ALPHA" => Some(Self::Alpha), - "BETA" => Some(Self::Beta), - "GA" => Some(Self::Ga), - "DEPRECATED" => Some(Self::Deprecated), - _ => None, - } - } -} -/// Required information for every language. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CommonLanguageSettings { - /// Link to automatically generated reference documentation. Example: - /// - #[deprecated] - #[prost(string, tag = "1")] - pub reference_docs_uri: ::prost::alloc::string::String, - /// The destination where API teams want this client library to be published. - #[prost(enumeration = "ClientLibraryDestination", repeated, tag = "2")] - pub destinations: ::prost::alloc::vec::Vec, -} -/// Details about how and where to publish client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ClientLibrarySettings { - /// Version of the API to apply these settings to. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// Launch stage of this version of the API. - #[prost(enumeration = "LaunchStage", tag = "2")] - pub launch_stage: i32, - /// When using transport=rest, the client request will encode enums as - /// numbers rather than strings. - #[prost(bool, tag = "3")] - pub rest_numeric_enums: bool, - /// Settings for legacy Java features, supported in the Service YAML. - #[prost(message, optional, tag = "21")] - pub java_settings: ::core::option::Option, - /// Settings for C++ client libraries. - #[prost(message, optional, tag = "22")] - pub cpp_settings: ::core::option::Option, - /// Settings for PHP client libraries. - #[prost(message, optional, tag = "23")] - pub php_settings: ::core::option::Option, - /// Settings for Python client libraries. - #[prost(message, optional, tag = "24")] - pub python_settings: ::core::option::Option, - /// Settings for Node client libraries. - #[prost(message, optional, tag = "25")] - pub node_settings: ::core::option::Option, - /// Settings for .NET client libraries. - #[prost(message, optional, tag = "26")] - pub dotnet_settings: ::core::option::Option, - /// Settings for Ruby client libraries. - #[prost(message, optional, tag = "27")] - pub ruby_settings: ::core::option::Option, - /// Settings for Go client libraries. - #[prost(message, optional, tag = "28")] - pub go_settings: ::core::option::Option, -} -/// This message configures the settings for publishing [Google Cloud Client -/// libraries]() -/// generated from the service config. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Publishing { - /// A list of API method settings, e.g. the behavior for methods that use the - /// long-running operation pattern. - #[prost(message, repeated, tag = "2")] - pub method_settings: ::prost::alloc::vec::Vec, - /// Link to a place that API users can report issues. Example: - /// - #[prost(string, tag = "101")] - pub new_issue_uri: ::prost::alloc::string::String, - /// Link to product home page. Example: - /// - #[prost(string, tag = "102")] - pub documentation_uri: ::prost::alloc::string::String, - /// Used as a tracking tag when collecting data about the APIs developer - /// relations artifacts like docs, packages delivered to package managers, - /// etc. Example: "speech". - #[prost(string, tag = "103")] - pub api_short_name: ::prost::alloc::string::String, - /// GitHub label to apply to issues and pull requests opened for this API. - #[prost(string, tag = "104")] - pub github_label: ::prost::alloc::string::String, - /// GitHub teams to be added to CODEOWNERS in the directory in GitHub - /// containing source code for the client libraries for this API. - #[prost(string, repeated, tag = "105")] - pub codeowner_github_teams: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A prefix used in sample code when demarking regions to be included in - /// documentation. - #[prost(string, tag = "106")] - pub doc_tag_prefix: ::prost::alloc::string::String, - /// For whom the client library is being published. - #[prost(enumeration = "ClientLibraryOrganization", tag = "107")] - pub organization: i32, - /// Client library settings. If the same version string appears multiple - /// times in this list, then the last one wins. Settings from earlier - /// settings with the same version string are discarded. - #[prost(message, repeated, tag = "109")] - pub library_settings: ::prost::alloc::vec::Vec, -} -/// Settings for Java client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct JavaSettings { - /// The package name to use in Java. Clobbers the java_package option - /// set in the protobuf. This should be used **only** by APIs - /// who have already set the language_settings.java.package_name" field - /// in gapic.yaml. API teams should use the protobuf java_package option - /// where possible. - /// - /// Example of a YAML configuration:: - /// - /// publishing: - /// java_settings: - /// library_package: com.google.cloud.pubsub.v1 - #[prost(string, tag = "1")] - pub library_package: ::prost::alloc::string::String, - /// Configure the Java class name to use instead of the service's for its - /// corresponding generated GAPIC client. Keys are fully-qualified - /// service names as they appear in the protobuf (including the full - /// the language_settings.java.interface_names" field in gapic.yaml. API - /// teams should otherwise use the service name as it appears in the - /// protobuf. - /// - /// Example of a YAML configuration:: - /// - /// publishing: - /// java_settings: - /// service_class_names: - /// - google.pubsub.v1.Publisher: TopicAdmin - /// - google.pubsub.v1.Subscriber: SubscriptionAdmin - #[prost(map = "string, string", tag = "2")] - pub service_class_names: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Some settings. - #[prost(message, optional, tag = "3")] - pub common: ::core::option::Option, -} -/// Settings for C++ client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CppSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Php client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PhpSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Python client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PythonSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Node client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Dotnet client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DotnetSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Ruby client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RubySettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Settings for Go client libraries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GoSettings { - /// Some settings. - #[prost(message, optional, tag = "1")] - pub common: ::core::option::Option, -} -/// Describes the generator configuration for a method. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MethodSettings { - /// The fully qualified name of the method, for which the options below apply. - /// This is used to find the method to apply the options. - #[prost(string, tag = "1")] - pub selector: ::prost::alloc::string::String, - /// Describes settings to use for long-running operations when generating - /// API methods for RPCs. Complements RPCs that use the annotations in - /// google/longrunning/operations.proto. - /// - /// Example of a YAML configuration:: - /// - /// publishing: - /// method_behavior: - /// - selector: CreateAdDomain - /// long_running: - /// initial_poll_delay: - /// seconds: 60 # 1 minute - /// poll_delay_multiplier: 1.5 - /// max_poll_delay: - /// seconds: 360 # 6 minutes - /// total_poll_timeout: - /// seconds: 54000 # 90 minutes - #[prost(message, optional, tag = "2")] - pub long_running: ::core::option::Option, -} -/// Nested message and enum types in `MethodSettings`. -pub mod method_settings { - /// Describes settings to use when generating API methods that use the - /// long-running operation pattern. - /// All default values below are from those used in the client library - /// generators (e.g. - /// \[Java\]()). - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct LongRunning { - /// Initial delay after which the first poll request will be made. - /// Default value: 5 seconds. - #[prost(message, optional, tag = "1")] - pub initial_poll_delay: ::core::option::Option<::prost_types::Duration>, - /// Multiplier to gradually increase delay between subsequent polls until it - /// reaches max_poll_delay. - /// Default value: 1.5. - #[prost(float, tag = "2")] - pub poll_delay_multiplier: f32, - /// Maximum time between two subsequent poll requests. - /// Default value: 45 seconds. - #[prost(message, optional, tag = "3")] - pub max_poll_delay: ::core::option::Option<::prost_types::Duration>, - /// Total polling timeout. - /// Default value: 5 minutes. - #[prost(message, optional, tag = "4")] - pub total_poll_timeout: ::core::option::Option<::prost_types::Duration>, - } -} -/// The organization for which the client libraries are being published. -/// Affects the url where generated docs are published, etc. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ClientLibraryOrganization { - /// Not useful. - Unspecified = 0, - /// Google Cloud Platform Org. - Cloud = 1, - /// Ads (Advertising) Org. - Ads = 2, - /// Photos Org. - Photos = 3, - /// Street View Org. - StreetView = 4, -} -impl ClientLibraryOrganization { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ClientLibraryOrganization::Unspecified => "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", - ClientLibraryOrganization::Cloud => "CLOUD", - ClientLibraryOrganization::Ads => "ADS", - ClientLibraryOrganization::Photos => "PHOTOS", - ClientLibraryOrganization::StreetView => "STREET_VIEW", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED" => Some(Self::Unspecified), - "CLOUD" => Some(Self::Cloud), - "ADS" => Some(Self::Ads), - "PHOTOS" => Some(Self::Photos), - "STREET_VIEW" => Some(Self::StreetView), - _ => None, - } - } -} -/// To where should client libraries be published? -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ClientLibraryDestination { - /// Client libraries will neither be generated nor published to package - /// managers. - Unspecified = 0, - /// Generate the client library in a repo under github.com/googleapis, - /// but don't publish it to package managers. - Github = 10, - /// Publish the library to package managers like nuget.org and npmjs.com. - PackageManager = 20, -} -impl ClientLibraryDestination { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ClientLibraryDestination::Unspecified => "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", - ClientLibraryDestination::Github => "GITHUB", - ClientLibraryDestination::PackageManager => "PACKAGE_MANAGER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED" => Some(Self::Unspecified), - "GITHUB" => Some(Self::Github), - "PACKAGE_MANAGER" => Some(Self::PackageManager), - _ => None, - } - } -} -/// An indicator of the behavior of a given field (for example, that a field -/// is required in requests, or given as output but ignored as input). -/// This **does not** change the behavior in protocol buffers itself; it only -/// denotes the behavior and may affect how API tooling handles the field. -/// -/// Note: This enum **may** receive new values in the future. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum FieldBehavior { - /// Conventional default for enums. Do not use this. - Unspecified = 0, - /// Specifically denotes a field as optional. - /// While all fields in protocol buffers are optional, this may be specified - /// for emphasis if appropriate. - Optional = 1, - /// Denotes a field as required. - /// This indicates that the field **must** be provided as part of the request, - /// and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - Required = 2, - /// Denotes a field as output only. - /// This indicates that the field is provided in responses, but including the - /// field in a request does nothing (the server *must* ignore it and - /// *must not* throw an error as a result of the field's presence). - OutputOnly = 3, - /// Denotes a field as input only. - /// This indicates that the field is provided in requests, and the - /// corresponding field is not included in output. - InputOnly = 4, - /// Denotes a field as immutable. - /// This indicates that the field may be set once in a request to create a - /// resource, but may not be changed thereafter. - Immutable = 5, - /// Denotes that a (repeated) field is an unordered list. - /// This indicates that the service may provide the elements of the list - /// in any arbitrary order, rather than the order the user originally - /// provided. Additionally, the list's order may or may not be stable. - UnorderedList = 6, - /// Denotes that this field returns a non-empty default value if not set. - /// This indicates that if the user provides the empty value in a request, - /// a non-empty value will be returned. The user will not be aware of what - /// non-empty value to expect. - NonEmptyDefault = 7, -} -impl FieldBehavior { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - FieldBehavior::Unspecified => "FIELD_BEHAVIOR_UNSPECIFIED", - FieldBehavior::Optional => "OPTIONAL", - FieldBehavior::Required => "REQUIRED", - FieldBehavior::OutputOnly => "OUTPUT_ONLY", - FieldBehavior::InputOnly => "INPUT_ONLY", - FieldBehavior::Immutable => "IMMUTABLE", - FieldBehavior::UnorderedList => "UNORDERED_LIST", - FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FIELD_BEHAVIOR_UNSPECIFIED" => Some(Self::Unspecified), - "OPTIONAL" => Some(Self::Optional), - "REQUIRED" => Some(Self::Required), - "OUTPUT_ONLY" => Some(Self::OutputOnly), - "INPUT_ONLY" => Some(Self::InputOnly), - "IMMUTABLE" => Some(Self::Immutable), - "UNORDERED_LIST" => Some(Self::UnorderedList), - "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), - _ => None, - } - } -} -/// A simple descriptor of a resource type. -/// -/// ResourceDescriptor annotates a resource message (either by means of a -/// protobuf annotation or use in the service config), and associates the -/// resource's schema, the resource type, and the pattern of the resource name. -/// -/// Example: -/// -/// message Topic { -/// // Indicates this message defines a resource schema. -/// // Declares the resource type in the format of {service}/{kind}. -/// // For Kubernetes resources, the format is {api group}/{kind}. -/// option (google.api.resource) = { -/// type: "pubsub.googleapis.com/Topic" -/// pattern: "projects/{project}/topics/{topic}" -/// }; -/// } -/// -/// The ResourceDescriptor Yaml config will look like: -/// -/// resources: -/// - type: "pubsub.googleapis.com/Topic" -/// pattern: "projects/{project}/topics/{topic}" -/// -/// Sometimes, resources have multiple patterns, typically because they can -/// live under multiple parents. -/// -/// Example: -/// -/// message LogEntry { -/// option (google.api.resource) = { -/// type: "logging.googleapis.com/LogEntry" -/// pattern: "projects/{project}/logs/{log}" -/// pattern: "folders/{folder}/logs/{log}" -/// pattern: "organizations/{organization}/logs/{log}" -/// pattern: "billingAccounts/{billing_account}/logs/{log}" -/// }; -/// } -/// -/// The ResourceDescriptor Yaml config will look like: -/// -/// resources: -/// - type: 'logging.googleapis.com/LogEntry' -/// pattern: "projects/{project}/logs/{log}" -/// pattern: "folders/{folder}/logs/{log}" -/// pattern: "organizations/{organization}/logs/{log}" -/// pattern: "billingAccounts/{billing_account}/logs/{log}" -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ResourceDescriptor { - /// The resource type. It must be in the format of - /// {service_name}/{resource_type_kind}. The `resource_type_kind` must be - /// singular and must not include version numbers. - /// - /// Example: `storage.googleapis.com/Bucket` - /// - /// The value of the resource_type_kind must follow the regular expression - /// /\[A-Za-z][a-zA-Z0-9\]+/. It should start with an upper case character and - /// should use PascalCase (UpperCamelCase). The maximum number of - /// characters allowed for the `resource_type_kind` is 100. - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - /// Optional. The relative resource name pattern associated with this resource - /// type. The DNS prefix of the full resource name shouldn't be specified here. - /// - /// The path pattern must follow the syntax, which aligns with HTTP binding - /// syntax: - /// - /// Template = Segment { "/" Segment } ; - /// Segment = LITERAL | Variable ; - /// Variable = "{" LITERAL "}" ; - /// - /// Examples: - /// - /// - "projects/{project}/topics/{topic}" - /// - "projects/{project}/knowledgeBases/{knowledge_base}" - /// - /// The components in braces correspond to the IDs for each resource in the - /// hierarchy. It is expected that, if multiple patterns are provided, - /// the same component name (e.g. "project") refers to IDs of the same - /// type of resource. - #[prost(string, repeated, tag = "2")] - pub pattern: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Optional. The field on the resource that designates the resource name - /// field. If omitted, this is assumed to be "name". - #[prost(string, tag = "3")] - pub name_field: ::prost::alloc::string::String, - /// Optional. The historical or future-looking state of the resource pattern. - /// - /// Example: - /// - /// // The InspectTemplate message originally only supported resource - /// // names with organization, and project was added later. - /// message InspectTemplate { - /// option (google.api.resource) = { - /// type: "dlp.googleapis.com/InspectTemplate" - /// pattern: - /// "organizations/{organization}/inspectTemplates/{inspect_template}" - /// pattern: "projects/{project}/inspectTemplates/{inspect_template}" - /// history: ORIGINALLY_SINGLE_PATTERN - /// }; - /// } - #[prost(enumeration = "resource_descriptor::History", tag = "4")] - pub history: i32, - /// The plural name used in the resource name and permission names, such as - /// 'projects' for the resource name of 'projects/{project}' and the permission - /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - /// concept of the `plural` field in k8s CRD spec - /// - /// - /// Note: The plural form is required even for singleton resources. See - /// - #[prost(string, tag = "5")] - pub plural: ::prost::alloc::string::String, - /// The same concept of the `singular` field in k8s CRD spec - /// - /// Such as "project" for the `resourcemanager.googleapis.com/Project` type. - #[prost(string, tag = "6")] - pub singular: ::prost::alloc::string::String, - /// Style flag(s) for this resource. - /// These indicate that a resource is expected to conform to a given - /// style. See the specific style flags for additional information. - #[prost(enumeration = "resource_descriptor::Style", repeated, tag = "10")] - pub style: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `ResourceDescriptor`. -pub mod resource_descriptor { - /// A description of the historical or future-looking state of the - /// resource pattern. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum History { - /// The "unset" value. - Unspecified = 0, - /// The resource originally had one pattern and launched as such, and - /// additional patterns were added later. - OriginallySinglePattern = 1, - /// The resource has one pattern, but the API owner expects to add more - /// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - /// that from being necessary once there are multiple patterns.) - FutureMultiPattern = 2, - } - impl History { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - History::Unspecified => "HISTORY_UNSPECIFIED", - History::OriginallySinglePattern => "ORIGINALLY_SINGLE_PATTERN", - History::FutureMultiPattern => "FUTURE_MULTI_PATTERN", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "HISTORY_UNSPECIFIED" => Some(Self::Unspecified), - "ORIGINALLY_SINGLE_PATTERN" => Some(Self::OriginallySinglePattern), - "FUTURE_MULTI_PATTERN" => Some(Self::FutureMultiPattern), - _ => None, - } - } - } - /// A flag representing a specific style that a resource claims to conform to. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum Style { - /// The unspecified value. Do not use. - Unspecified = 0, - /// This resource is intended to be "declarative-friendly". - /// - /// Declarative-friendly resources must be more strictly consistent, and - /// setting this to true communicates to tools that this resource should - /// adhere to declarative-friendly expectations. - /// - /// Note: This is used by the API linter (linter.aip.dev) to enable - /// additional checks. - DeclarativeFriendly = 1, - } - impl Style { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Style::Unspecified => "STYLE_UNSPECIFIED", - Style::DeclarativeFriendly => "DECLARATIVE_FRIENDLY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STYLE_UNSPECIFIED" => Some(Self::Unspecified), - "DECLARATIVE_FRIENDLY" => Some(Self::DeclarativeFriendly), - _ => None, - } - } - } -} -/// Defines a proto annotation that describes a string field that refers to -/// an API resource. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ResourceReference { - /// The resource type that the annotated field references. - /// - /// Example: - /// - /// message Subscription { - /// string topic = 2 [(google.api.resource_reference) = { - /// type: "pubsub.googleapis.com/Topic" - /// }]; - /// } - /// - /// Occasionally, a field may reference an arbitrary resource. In this case, - /// APIs use the special value * in their resource reference. - /// - /// Example: - /// - /// message GetIamPolicyRequest { - /// string resource = 2 [(google.api.resource_reference) = { - /// type: "*" - /// }]; - /// } - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - /// The resource type of a child collection that the annotated field - /// references. This is useful for annotating the `parent` field that - /// doesn't have a fixed resource type. - /// - /// Example: - /// - /// message ListLogEntriesRequest { - /// string parent = 1 [(google.api.resource_reference) = { - /// child_type: "logging.googleapis.com/LogEntry" - /// }; - /// } - #[prost(string, tag = "2")] - pub child_type: ::prost::alloc::string::String, -} -/// A description of a label. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LabelDescriptor { - /// The label key. - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// The type of data that can be assigned to the label. - #[prost(enumeration = "label_descriptor::ValueType", tag = "2")] - pub value_type: i32, - /// A human-readable description for the label. - #[prost(string, tag = "3")] - pub description: ::prost::alloc::string::String, -} -/// Nested message and enum types in `LabelDescriptor`. -pub mod label_descriptor { - /// Value types that can be used as label values. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum ValueType { - /// A variable-length string. This is the default. - String = 0, - /// Boolean; true or false. - Bool = 1, - /// A 64-bit signed integer. - Int64 = 2, - } - impl ValueType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ValueType::String => "STRING", - ValueType::Bool => "BOOL", - ValueType::Int64 => "INT64", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STRING" => Some(Self::String), - "BOOL" => Some(Self::Bool), - "INT64" => Some(Self::Int64), - _ => None, - } - } - } -} -/// An object that describes the schema of a \[MonitoredResource][google.api.MonitoredResource\] object using a -/// type name and a set of labels. For example, the monitored resource -/// descriptor for Google Compute Engine VM instances has a type of -/// `"gce_instance"` and specifies the use of the labels `"instance_id"` and -/// `"zone"` to identify particular VM instances. -/// -/// Different APIs can support different monitored resource types. APIs generally -/// provide a `list` method that returns the monitored resource descriptors used -/// by the API. -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MonitoredResourceDescriptor { - /// Optional. The resource name of the monitored resource descriptor: - /// `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where - /// {type} is the value of the `type` field in this object and - /// {project_id} is a project ID that provides API-specific context for - /// accessing the type. APIs that do not use project information can use the - /// resource name format `"monitoredResourceDescriptors/{type}"`. - #[prost(string, tag = "5")] - pub name: ::prost::alloc::string::String, - /// Required. The monitored resource type. For example, the type - /// `"cloudsql_database"` represents databases in Google Cloud SQL. - /// For a list of types, see [Monitoring resource - /// types]() - /// and [Logging resource - /// types](). - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - /// Optional. A concise name for the monitored resource type that might be - /// displayed in user interfaces. It should be a Title Cased Noun Phrase, - /// without any article or other determiners. For example, - /// `"Google Cloud SQL Database"`. - #[prost(string, tag = "2")] - pub display_name: ::prost::alloc::string::String, - /// Optional. A detailed description of the monitored resource type that might - /// be used in documentation. - #[prost(string, tag = "3")] - pub description: ::prost::alloc::string::String, - /// Required. A set of labels used to describe instances of this monitored - /// resource type. For example, an individual Google Cloud SQL database is - /// identified by values for the labels `"database_id"` and `"zone"`. - #[prost(message, repeated, tag = "4")] - pub labels: ::prost::alloc::vec::Vec, - /// Optional. The launch stage of the monitored resource definition. - #[prost(enumeration = "LaunchStage", tag = "7")] - pub launch_stage: i32, -} -/// An object representing a resource that can be used for monitoring, logging, -/// billing, or other purposes. Examples include virtual machine instances, -/// databases, and storage devices such as disks. The `type` field identifies a -/// \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] object that describes the resource's -/// schema. Information in the `labels` field identifies the actual resource and -/// its attributes according to the schema. For example, a particular Compute -/// Engine VM instance could be represented by the following object, because the -/// \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] for `"gce_instance"` has labels -/// `"project_id"`, `"instance_id"` and `"zone"`: -/// -/// { "type": "gce_instance", -/// "labels": { "project_id": "my-project", -/// "instance_id": "12345678901234", -/// "zone": "us-central1-a" }} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MonitoredResource { - /// Required. The monitored resource type. This field must match - /// the `type` field of a \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] object. For - /// example, the type of a Compute Engine VM instance is `gce_instance`. - /// Some descriptors include the service name in the type; for example, - /// the type of a Datastream stream is `datastream.googleapis.com/Stream`. - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - /// Required. Values for all of the labels listed in the associated monitored - /// resource descriptor. For example, Compute Engine VM instances use the - /// labels `"project_id"`, `"instance_id"`, and `"zone"`. - #[prost(map = "string, string", tag = "2")] - pub labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, -} -/// Auxiliary metadata for a \[MonitoredResource][google.api.MonitoredResource\] object. -/// \[MonitoredResource][google.api.MonitoredResource\] objects contain the minimum set of information to -/// uniquely identify a monitored resource instance. There is some other useful -/// auxiliary metadata. Monitoring and Logging use an ingestion -/// pipeline to extract metadata for cloud resources of all types, and store -/// the metadata in this message. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MonitoredResourceMetadata { - /// Output only. Values for predefined system metadata labels. - /// System labels are a kind of metadata extracted by Google, including - /// "machine_image", "vpc", "subnet_id", - /// "security_group", "name", etc. - /// System label values can be only strings, Boolean values, or a list of - /// strings. For example: - /// - /// { "name": "my-test-instance", - /// "security_group": ["a", "b", "c"], - /// "spot_instance": false } - #[prost(message, optional, tag = "1")] - pub system_labels: ::core::option::Option<::prost_types::Struct>, - /// Output only. A map of user-defined metadata labels. - #[prost(map = "string, string", tag = "2")] - pub user_labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, -} diff --git a/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs b/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs deleted file mode 100644 index aef8a0245c..0000000000 --- a/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs +++ /dev/null @@ -1,626 +0,0 @@ -/// A span represents a single operation within a trace. Spans can be -/// nested to form a trace tree. Often, a trace contains a root span -/// that describes the end-to-end latency, and one or more subspans for -/// its sub-operations. -/// -/// A trace can also contain multiple root spans, or none at all. -/// Spans do not need to be contiguous. There might be -/// gaps or overlaps between spans in a trace. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Span { - /// Required. The resource name of the span in the following format: - /// - /// * `projects/\[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID\]` - /// - /// `\[TRACE_ID\]` is a unique identifier for a trace within a project; - /// it is a 32-character hexadecimal encoding of a 16-byte array. It should - /// not be zero. - /// - /// `\[SPAN_ID\]` is a unique identifier for a span within a trace; it - /// is a 16-character hexadecimal encoding of an 8-byte array. It should not - /// be zero. - /// . - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The `\[SPAN_ID\]` portion of the span's resource name. - #[prost(string, tag = "2")] - pub span_id: ::prost::alloc::string::String, - /// The `\[SPAN_ID\]` of this span's parent span. If this is a root span, - /// then this field must be empty. - #[prost(string, tag = "3")] - pub parent_span_id: ::prost::alloc::string::String, - /// Required. A description of the span's operation (up to 128 bytes). - /// Cloud Trace displays the description in the - /// Cloud console. - /// For example, the display name can be a qualified method name or a file name - /// and a line number where the operation is called. A best practice is to use - /// the same display name within an application and at the same call point. - /// This makes it easier to correlate spans in different traces. - #[prost(message, optional, tag = "4")] - pub display_name: ::core::option::Option, - /// Required. The start time of the span. On the client side, this is the time - /// kept by the local machine where the span execution starts. On the server - /// side, this is the time when the server's application handler starts - /// running. - #[prost(message, optional, tag = "5")] - pub start_time: ::core::option::Option<::prost_types::Timestamp>, - /// Required. The end time of the span. On the client side, this is the time - /// kept by the local machine where the span execution ends. On the server - /// side, this is the time when the server application handler stops running. - #[prost(message, optional, tag = "6")] - pub end_time: ::core::option::Option<::prost_types::Timestamp>, - /// A set of attributes on the span. You can have up to 32 attributes per - /// span. - #[prost(message, optional, tag = "7")] - pub attributes: ::core::option::Option, - /// Stack trace captured at the start of the span. - #[prost(message, optional, tag = "8")] - pub stack_trace: ::core::option::Option, - /// A set of time events. You can have up to 32 annotations and 128 message - /// events per span. - #[prost(message, optional, tag = "9")] - pub time_events: ::core::option::Option, - /// Links associated with the span. You can have up to 128 links per Span. - #[prost(message, optional, tag = "10")] - pub links: ::core::option::Option, - /// Optional. The final status for this span. - #[prost(message, optional, tag = "11")] - pub status: ::core::option::Option, - /// Optional. Set this parameter to indicate whether this span is in - /// the same process as its parent. If you do not set this parameter, - /// Trace is unable to take advantage of this helpful information. - #[prost(message, optional, tag = "12")] - pub same_process_as_parent_span: ::core::option::Option, - /// Optional. The number of child spans that were generated while this span - /// was active. If set, allows implementation to detect missing child spans. - #[prost(message, optional, tag = "13")] - pub child_span_count: ::core::option::Option, - /// Optional. Distinguishes between spans generated in a particular context. - /// For example, two spans with the same name may be distinguished using - /// `CLIENT` (caller) and `SERVER` (callee) to identify an RPC call. - #[prost(enumeration = "span::SpanKind", tag = "14")] - pub span_kind: i32, -} -/// Nested message and enum types in `Span`. -pub mod span { - /// A set of attributes as key-value pairs. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Attributes { - /// A set of attributes. Each attribute's key can be up to 128 bytes - /// long. The value can be a string up to 256 bytes, a signed 64-bit integer, - /// or the boolean values `true` or `false`. For example: - /// - /// "/instance_id": { "string_value": { "value": "my-instance" } } - /// "/http/request_bytes": { "int_value": 300 } - /// "abc.com/myattribute": { "bool_value": false } - #[prost(map = "string, message", tag = "1")] - pub attribute_map: - ::std::collections::HashMap<::prost::alloc::string::String, super::AttributeValue>, - /// The number of attributes that were discarded. Attributes can be discarded - /// because their keys are too long or because there are too many attributes. - /// If this value is 0 then all attributes are valid. - #[prost(int32, tag = "2")] - pub dropped_attributes_count: i32, - } - /// A time-stamped annotation or message event in the Span. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TimeEvent { - /// The timestamp indicating the time the event occurred. - #[prost(message, optional, tag = "1")] - pub time: ::core::option::Option<::prost_types::Timestamp>, - /// A `TimeEvent` can contain either an `Annotation` object or a - /// `MessageEvent` object, but not both. - #[prost(oneof = "time_event::Value", tags = "2, 3")] - pub value: ::core::option::Option, - } - /// Nested message and enum types in `TimeEvent`. - pub mod time_event { - /// Text annotation with a set of attributes. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Annotation { - /// A user-supplied message describing the event. The maximum length for - /// the description is 256 bytes. - #[prost(message, optional, tag = "1")] - pub description: ::core::option::Option, - /// A set of attributes on the annotation. You can have up to 4 attributes - /// per Annotation. - #[prost(message, optional, tag = "2")] - pub attributes: ::core::option::Option, - } - /// An event describing a message sent/received between Spans. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MessageEvent { - /// Type of MessageEvent. Indicates whether the message was sent or - /// received. - #[prost(enumeration = "message_event::Type", tag = "1")] - pub r#type: i32, - /// An identifier for the MessageEvent's message that can be used to match - /// `SENT` and `RECEIVED` MessageEvents. - #[prost(int64, tag = "2")] - pub id: i64, - /// The number of uncompressed bytes sent or received. - #[prost(int64, tag = "3")] - pub uncompressed_size_bytes: i64, - /// The number of compressed bytes sent or received. If missing, the - /// compressed size is assumed to be the same size as the uncompressed - /// size. - #[prost(int64, tag = "4")] - pub compressed_size_bytes: i64, - } - /// Nested message and enum types in `MessageEvent`. - pub mod message_event { - /// Indicates whether the message was sent or received. - #[derive( - Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, - )] - #[repr(i32)] - pub enum Type { - /// Unknown event type. - Unspecified = 0, - /// Indicates a sent message. - Sent = 1, - /// Indicates a received message. - Received = 2, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Sent => "SENT", - Type::Received => "RECEIVED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TYPE_UNSPECIFIED" => Some(Self::Unspecified), - "SENT" => Some(Self::Sent), - "RECEIVED" => Some(Self::Received), - _ => None, - } - } - } - } - /// A `TimeEvent` can contain either an `Annotation` object or a - /// `MessageEvent` object, but not both. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - /// Text annotation with a set of attributes. - #[prost(message, tag = "2")] - Annotation(Annotation), - /// An event describing a message sent/received between Spans. - #[prost(message, tag = "3")] - MessageEvent(MessageEvent), - } - } - /// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation - /// on the span, consisting of either user-supplied key:value pairs, or - /// details of a message sent/received between Spans. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct TimeEvents { - /// A collection of `TimeEvent`s. - #[prost(message, repeated, tag = "1")] - pub time_event: ::prost::alloc::vec::Vec, - /// The number of dropped annotations in all the included time events. - /// If the value is 0, then no annotations were dropped. - #[prost(int32, tag = "2")] - pub dropped_annotations_count: i32, - /// The number of dropped message events in all the included time events. - /// If the value is 0, then no message events were dropped. - #[prost(int32, tag = "3")] - pub dropped_message_events_count: i32, - } - /// A pointer from the current span to another span in the same trace or in a - /// different trace. For example, this can be used in batching operations, - /// where a single batch handler processes multiple requests from different - /// traces or when the handler receives a request from a different project. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Link { - /// The `\[TRACE_ID\]` for a trace within a project. - #[prost(string, tag = "1")] - pub trace_id: ::prost::alloc::string::String, - /// The `\[SPAN_ID\]` for a span within a trace. - #[prost(string, tag = "2")] - pub span_id: ::prost::alloc::string::String, - /// The relationship of the current span relative to the linked span. - #[prost(enumeration = "link::Type", tag = "3")] - pub r#type: i32, - /// A set of attributes on the link. Up to 32 attributes can be - /// specified per link. - #[prost(message, optional, tag = "4")] - pub attributes: ::core::option::Option, - } - /// Nested message and enum types in `Link`. - pub mod link { - /// The relationship of the current span relative to the linked span: child, - /// parent, or unspecified. - #[derive( - Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, - )] - #[repr(i32)] - pub enum Type { - /// The relationship of the two spans is unknown. - Unspecified = 0, - /// The linked span is a child of the current span. - ChildLinkedSpan = 1, - /// The linked span is a parent of the current span. - ParentLinkedSpan = 2, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::ChildLinkedSpan => "CHILD_LINKED_SPAN", - Type::ParentLinkedSpan => "PARENT_LINKED_SPAN", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TYPE_UNSPECIFIED" => Some(Self::Unspecified), - "CHILD_LINKED_SPAN" => Some(Self::ChildLinkedSpan), - "PARENT_LINKED_SPAN" => Some(Self::ParentLinkedSpan), - _ => None, - } - } - } - } - /// A collection of links, which are references from this span to a span - /// in the same or different trace. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Links { - /// A collection of links. - #[prost(message, repeated, tag = "1")] - pub link: ::prost::alloc::vec::Vec, - /// The number of dropped links after the maximum size was enforced. If - /// this value is 0, then no links were dropped. - #[prost(int32, tag = "2")] - pub dropped_links_count: i32, - } - /// Type of span. Can be used to specify additional relationships between spans - /// in addition to a parent/child relationship. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum SpanKind { - /// Unspecified. Do NOT use as default. - /// Implementations MAY assume SpanKind.INTERNAL to be default. - Unspecified = 0, - /// Indicates that the span is used internally. Default value. - Internal = 1, - /// Indicates that the span covers server-side handling of an RPC or other - /// remote network request. - Server = 2, - /// Indicates that the span covers the client-side wrapper around an RPC or - /// other remote request. - Client = 3, - /// Indicates that the span describes producer sending a message to a broker. - /// Unlike client and server, there is no direct critical path latency - /// relationship between producer and consumer spans (e.g. publishing a - /// message to a pubsub service). - Producer = 4, - /// Indicates that the span describes consumer receiving a message from a - /// broker. Unlike client and server, there is no direct critical path - /// latency relationship between producer and consumer spans (e.g. receiving - /// a message from a pubsub service subscription). - Consumer = 5, - } - impl SpanKind { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - SpanKind::Unspecified => "SPAN_KIND_UNSPECIFIED", - SpanKind::Internal => "INTERNAL", - SpanKind::Server => "SERVER", - SpanKind::Client => "CLIENT", - SpanKind::Producer => "PRODUCER", - SpanKind::Consumer => "CONSUMER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SPAN_KIND_UNSPECIFIED" => Some(Self::Unspecified), - "INTERNAL" => Some(Self::Internal), - "SERVER" => Some(Self::Server), - "CLIENT" => Some(Self::Client), - "PRODUCER" => Some(Self::Producer), - "CONSUMER" => Some(Self::Consumer), - _ => None, - } - } - } -} -/// The allowed types for `\[VALUE\]` in a `\[KEY]:[VALUE\]` attribute. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AttributeValue { - /// The type of the value. - #[prost(oneof = "attribute_value::Value", tags = "1, 2, 3")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `AttributeValue`. -pub mod attribute_value { - /// The type of the value. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - /// A string up to 256 bytes long. - #[prost(message, tag = "1")] - StringValue(super::TruncatableString), - /// A 64-bit signed integer. - #[prost(int64, tag = "2")] - IntValue(i64), - /// A Boolean value represented by `true` or `false`. - #[prost(bool, tag = "3")] - BoolValue(bool), - } -} -/// A call stack appearing in a trace. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StackTrace { - /// Stack frames in this stack trace. A maximum of 128 frames are allowed. - #[prost(message, optional, tag = "1")] - pub stack_frames: ::core::option::Option, - /// The hash ID is used to conserve network bandwidth for duplicate - /// stack traces within a single trace. - /// - /// Often multiple spans will have identical stack traces. - /// The first occurrence of a stack trace should contain both the - /// `stackFrame` content and a value in `stackTraceHashId`. - /// - /// Subsequent spans within the same request can refer - /// to that stack trace by only setting `stackTraceHashId`. - #[prost(int64, tag = "2")] - pub stack_trace_hash_id: i64, -} -/// Nested message and enum types in `StackTrace`. -pub mod stack_trace { - /// Represents a single stack frame in a stack trace. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StackFrame { - /// The fully-qualified name that uniquely identifies the function or - /// method that is active in this frame (up to 1024 bytes). - #[prost(message, optional, tag = "1")] - pub function_name: ::core::option::Option, - /// An un-mangled function name, if `function_name` is mangled. - /// To get information about name mangling, run - /// [this search](). - /// The name can be fully-qualified (up to 1024 bytes). - #[prost(message, optional, tag = "2")] - pub original_function_name: ::core::option::Option, - /// The name of the source file where the function call appears (up to 256 - /// bytes). - #[prost(message, optional, tag = "3")] - pub file_name: ::core::option::Option, - /// The line number in `file_name` where the function call appears. - #[prost(int64, tag = "4")] - pub line_number: i64, - /// The column number where the function call appears, if available. - /// This is important in JavaScript because of its anonymous functions. - #[prost(int64, tag = "5")] - pub column_number: i64, - /// The binary module from where the code was loaded. - #[prost(message, optional, tag = "6")] - pub load_module: ::core::option::Option, - /// The version of the deployed source code (up to 128 bytes). - #[prost(message, optional, tag = "7")] - pub source_version: ::core::option::Option, - } - /// A collection of stack frames, which can be truncated. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StackFrames { - /// Stack frames in this call stack. - #[prost(message, repeated, tag = "1")] - pub frame: ::prost::alloc::vec::Vec, - /// The number of stack frames that were dropped because there - /// were too many stack frames. - /// If this value is 0, then no stack frames were dropped. - #[prost(int32, tag = "2")] - pub dropped_frames_count: i32, - } -} -/// Binary module. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Module { - /// For example: main binary, kernel modules, and dynamic libraries - /// such as libc.so, sharedlib.so (up to 256 bytes). - #[prost(message, optional, tag = "1")] - pub module: ::core::option::Option, - /// A unique identifier for the module, usually a hash of its - /// contents (up to 128 bytes). - #[prost(message, optional, tag = "2")] - pub build_id: ::core::option::Option, -} -/// Represents a string that might be shortened to a specified length. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TruncatableString { - /// The shortened string. For example, if the original string is 500 - /// bytes long and the limit of the string is 128 bytes, then - /// `value` contains the first 128 bytes of the 500-byte string. - /// - /// Truncation always happens on a UTF8 character boundary. If there - /// are multi-byte characters in the string, then the length of the - /// shortened string might be less than the size limit. - #[prost(string, tag = "1")] - pub value: ::prost::alloc::string::String, - /// The number of bytes removed from the original string. If this - /// value is 0, then the string was not shortened. - #[prost(int32, tag = "2")] - pub truncated_byte_count: i32, -} -/// The request message for the `BatchWriteSpans` method. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchWriteSpansRequest { - /// Required. The name of the project where the spans belong. The format is - /// `projects/\[PROJECT_ID\]`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. A list of new spans. The span names must not match existing - /// spans, otherwise the results are undefined. - #[prost(message, repeated, tag = "2")] - pub spans: ::prost::alloc::vec::Vec, -} -/// Generated client implementations. -pub mod trace_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::http::Uri; - use tonic::codegen::*; - /// Service for collecting and viewing traces and spans within a trace. - /// - /// A trace is a collection of spans corresponding to a single - /// operation or a set of operations in an application. - /// - /// A span is an individual timed event which forms a node of the trace tree. - /// A single trace can contain spans from multiple services. - #[derive(Debug, Clone)] - pub struct TraceServiceClient { - inner: tonic::client::Grpc, - } - impl TraceServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl TraceServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> TraceServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - >>::Error: - Into + Send + Sync, - { - TraceServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Batch writes new spans to new or existing traces. You cannot update - /// existing spans. - pub async fn batch_write_spans( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.devtools.cloudtrace.v2.TraceService", - "BatchWriteSpans", - )); - self.inner.unary(req, path, codec).await - } - /// Creates a new span. - pub async fn create_span( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.devtools.cloudtrace.v2.TraceService", - "CreateSpan", - )); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/opentelemetry-stackdriver/src/proto/logging/type.rs b/opentelemetry-stackdriver/src/proto/logging/type.rs deleted file mode 100644 index 042bc23c6a..0000000000 --- a/opentelemetry-stackdriver/src/proto/logging/type.rs +++ /dev/null @@ -1,142 +0,0 @@ -/// A common proto for logging HTTP requests. Only contains semantics -/// defined by the HTTP specification. Product-specific logging -/// information MUST be defined in a separate message. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HttpRequest { - /// The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`. - #[prost(string, tag = "1")] - pub request_method: ::prost::alloc::string::String, - /// The scheme (http, https), the host name, the path and the query - /// portion of the URL that was requested. - /// Example: `" - #[prost(string, tag = "2")] - pub request_url: ::prost::alloc::string::String, - /// The size of the HTTP request message in bytes, including the request - /// headers and the request body. - #[prost(int64, tag = "3")] - pub request_size: i64, - /// The response code indicating the status of response. - /// Examples: 200, 404. - #[prost(int32, tag = "4")] - pub status: i32, - /// The size of the HTTP response message sent back to the client, in bytes, - /// including the response headers and the response body. - #[prost(int64, tag = "5")] - pub response_size: i64, - /// The user agent sent by the client. Example: - /// `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET - /// CLR 1.0.3705)"`. - #[prost(string, tag = "6")] - pub user_agent: ::prost::alloc::string::String, - /// The IP address (IPv4 or IPv6) of the client that issued the HTTP - /// request. This field can include port information. Examples: - /// `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. - #[prost(string, tag = "7")] - pub remote_ip: ::prost::alloc::string::String, - /// The IP address (IPv4 or IPv6) of the origin server that the request was - /// sent to. This field can include port information. Examples: - /// `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. - #[prost(string, tag = "13")] - pub server_ip: ::prost::alloc::string::String, - /// The referer URL of the request, as defined in - /// [HTTP/1.1 Header Field - /// Definitions](). - #[prost(string, tag = "8")] - pub referer: ::prost::alloc::string::String, - /// The request processing latency on the server, from the time the request was - /// received until the response was sent. - #[prost(message, optional, tag = "14")] - pub latency: ::core::option::Option<::prost_types::Duration>, - /// Whether or not a cache lookup was attempted. - #[prost(bool, tag = "11")] - pub cache_lookup: bool, - /// Whether or not an entity was served from cache - /// (with or without validation). - #[prost(bool, tag = "9")] - pub cache_hit: bool, - /// Whether or not the response was validated with the origin server before - /// being served from cache. This field is only meaningful if `cache_hit` is - /// True. - #[prost(bool, tag = "10")] - pub cache_validated_with_origin_server: bool, - /// The number of HTTP response bytes inserted into cache. Set only when a - /// cache fill was attempted. - #[prost(int64, tag = "12")] - pub cache_fill_bytes: i64, - /// Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", "websocket" - #[prost(string, tag = "15")] - pub protocol: ::prost::alloc::string::String, -} -/// The severity of the event described in a log entry, expressed as one of the -/// standard severity levels listed below. For your reference, the levels are -/// assigned the listed numeric values. The effect of using numeric values other -/// than those listed is undefined. -/// -/// You can filter for log entries by severity. For example, the following -/// filter expression will match log entries with severities `INFO`, `NOTICE`, -/// and `WARNING`: -/// -/// severity > DEBUG AND severity <= WARNING -/// -/// If you are writing log entries, you should map other severity encodings to -/// one of these standard levels. For example, you might map all of Java's FINE, -/// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the -/// original severity level in the log entry payload if you wish. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum LogSeverity { - /// (0) The log entry has no assigned severity level. - Default = 0, - /// (100) Debug or trace information. - Debug = 100, - /// (200) Routine information, such as ongoing status or performance. - Info = 200, - /// (300) Normal but significant events, such as start up, shut down, or - /// a configuration change. - Notice = 300, - /// (400) Warning events might cause problems. - Warning = 400, - /// (500) Error events are likely to cause problems. - Error = 500, - /// (600) Critical events cause more severe problems or outages. - Critical = 600, - /// (700) A person must take an action immediately. - Alert = 700, - /// (800) One or more systems are unusable. - Emergency = 800, -} -impl LogSeverity { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - LogSeverity::Default => "DEFAULT", - LogSeverity::Debug => "DEBUG", - LogSeverity::Info => "INFO", - LogSeverity::Notice => "NOTICE", - LogSeverity::Warning => "WARNING", - LogSeverity::Error => "ERROR", - LogSeverity::Critical => "CRITICAL", - LogSeverity::Alert => "ALERT", - LogSeverity::Emergency => "EMERGENCY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DEFAULT" => Some(Self::Default), - "DEBUG" => Some(Self::Debug), - "INFO" => Some(Self::Info), - "NOTICE" => Some(Self::Notice), - "WARNING" => Some(Self::Warning), - "ERROR" => Some(Self::Error), - "CRITICAL" => Some(Self::Critical), - "ALERT" => Some(Self::Alert), - "EMERGENCY" => Some(Self::Emergency), - _ => None, - } - } -} diff --git a/opentelemetry-stackdriver/src/proto/logging/v2.rs b/opentelemetry-stackdriver/src/proto/logging/v2.rs deleted file mode 100644 index 390c4c75d7..0000000000 --- a/opentelemetry-stackdriver/src/proto/logging/v2.rs +++ /dev/null @@ -1,837 +0,0 @@ -/// An individual entry in a log. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogEntry { - /// Required. The resource name of the log to which this log entry belongs: - /// - /// "projects/\[PROJECT_ID]/logs/[LOG_ID\]" - /// "organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]" - /// "folders/\[FOLDER_ID]/logs/[LOG_ID\]" - /// - /// A project number may be used in place of PROJECT_ID. The project number is - /// translated to its corresponding PROJECT_ID internally and the `log_name` - /// field will contain PROJECT_ID in queries and exports. - /// - /// `\[LOG_ID\]` must be URL-encoded within `log_name`. Example: - /// `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. - /// - /// `\[LOG_ID\]` must be less than 512 characters long and can only include the - /// following characters: upper and lower case alphanumeric characters, - /// forward-slash, underscore, hyphen, and period. - /// - /// For backward compatibility, if `log_name` begins with a forward-slash, such - /// as `/projects/...`, then the log entry is ingested as usual, but the - /// forward-slash is removed. Listing the log entry will not show the leading - /// slash and filtering for a log name with a leading slash will never return - /// any results. - #[prost(string, tag = "12")] - pub log_name: ::prost::alloc::string::String, - /// Required. The monitored resource that produced this log entry. - /// - /// Example: a log entry that reports a database error would be associated with - /// the monitored resource designating the particular database that reported - /// the error. - #[prost(message, optional, tag = "8")] - pub resource: ::core::option::Option, - /// Optional. The time the event described by the log entry occurred. This time is used - /// to compute the log entry's age and to enforce the logs retention period. - /// If this field is omitted in a new log entry, then Logging assigns it the - /// current time. Timestamps have nanosecond accuracy, but trailing zeros in - /// the fractional seconds might be omitted when the timestamp is displayed. - /// - /// Incoming log entries must have timestamps that don't exceed the - /// [logs retention - /// period]() in - /// the past, and that don't exceed 24 hours in the future. Log entries outside - /// those time boundaries aren't ingested by Logging. - #[prost(message, optional, tag = "9")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The time the log entry was received by Logging. - #[prost(message, optional, tag = "24")] - pub receive_timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. - #[prost(enumeration = "super::r#type::LogSeverity", tag = "10")] - pub severity: i32, - /// Optional. A unique identifier for the log entry. If you provide a value, then - /// Logging considers other log entries in the same project, with the same - /// `timestamp`, and with the same `insert_id` to be duplicates which are - /// removed in a single query result. However, there are no guarantees of - /// de-duplication in the export of logs. - /// - /// If the `insert_id` is omitted when writing a log entry, the Logging API - /// assigns its own unique identifier in this field. - /// - /// In queries, the `insert_id` is also used to order log entries that have - /// the same `log_name` and `timestamp` values. - #[prost(string, tag = "4")] - pub insert_id: ::prost::alloc::string::String, - /// Optional. Information about the HTTP request associated with this log entry, if - /// applicable. - #[prost(message, optional, tag = "7")] - pub http_request: ::core::option::Option, - /// Optional. A map of key, value pairs that provides additional information about the - /// log entry. The labels can be user-defined or system-defined. - /// - /// User-defined labels are arbitrary key, value pairs that you can use to - /// classify logs. - /// - /// System-defined labels are defined by GCP services for platform logs. - /// They have two components - a service namespace component and the - /// attribute name. For example: `compute.googleapis.com/resource_name`. - /// - /// Cloud Logging truncates label keys that exceed 512 B and label - /// values that exceed 64 KB upon their associated log entry being - /// written. The truncation is indicated by an ellipsis at the - /// end of the character string. - #[prost(map = "string, string", tag = "11")] - pub labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Optional. Information about an operation associated with the log entry, if - /// applicable. - #[prost(message, optional, tag = "15")] - pub operation: ::core::option::Option, - /// Optional. Resource name of the trace associated with the log entry, if any. If it - /// contains a relative resource name, the name is assumed to be relative to - /// `//tracing.googleapis.com`. Example: - /// `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` - #[prost(string, tag = "22")] - pub trace: ::prost::alloc::string::String, - /// Optional. The span ID within the trace associated with the log entry. - /// - /// For Trace spans, this is the same format that the Trace API v2 uses: a - /// 16-character hexadecimal encoding of an 8-byte array, such as - /// `000000000000004a`. - #[prost(string, tag = "27")] - pub span_id: ::prost::alloc::string::String, - /// Optional. The sampling decision of the trace associated with the log entry. - /// - /// True means that the trace resource name in the `trace` field was sampled - /// for storage in a trace backend. False means that the trace was not sampled - /// for storage when this log entry was written, or the sampling decision was - /// unknown at the time. A non-sampled `trace` value is still useful as a - /// request correlation identifier. The default is False. - #[prost(bool, tag = "30")] - pub trace_sampled: bool, - /// Optional. Source code location information associated with the log entry, if any. - #[prost(message, optional, tag = "23")] - pub source_location: ::core::option::Option, - /// Optional. Information indicating this LogEntry is part of a sequence of multiple log - /// entries split from a single LogEntry. - #[prost(message, optional, tag = "35")] - pub split: ::core::option::Option, - /// The log entry payload, which can be one of multiple types. - #[prost(oneof = "log_entry::Payload", tags = "2, 3, 6")] - pub payload: ::core::option::Option, -} -/// Nested message and enum types in `LogEntry`. -pub mod log_entry { - /// The log entry payload, which can be one of multiple types. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Payload { - /// The log entry payload, represented as a protocol buffer. Some Google - /// Cloud Platform services use this field for their log entry payloads. - /// - /// The following protocol buffer types are supported; user-defined types - /// are not supported: - /// - /// "type.googleapis.com/google.cloud.audit.AuditLog" - /// "type.googleapis.com/google.appengine.logging.v1.RequestLog" - #[prost(message, tag = "2")] - ProtoPayload(::prost_types::Any), - /// The log entry payload, represented as a Unicode string (UTF-8). - #[prost(string, tag = "3")] - TextPayload(::prost::alloc::string::String), - /// The log entry payload, represented as a structure that is - /// expressed as a JSON object. - #[prost(message, tag = "6")] - JsonPayload(::prost_types::Struct), - } -} -/// Additional information about a potentially long-running operation with which -/// a log entry is associated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogEntryOperation { - /// Optional. An arbitrary operation identifier. Log entries with the same - /// identifier are assumed to be part of the same operation. - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Optional. An arbitrary producer identifier. The combination of `id` and - /// `producer` must be globally unique. Examples for `producer`: - /// `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. - #[prost(string, tag = "2")] - pub producer: ::prost::alloc::string::String, - /// Optional. Set this to True if this is the first log entry in the operation. - #[prost(bool, tag = "3")] - pub first: bool, - /// Optional. Set this to True if this is the last log entry in the operation. - #[prost(bool, tag = "4")] - pub last: bool, -} -/// Additional information about the source code location that produced the log -/// entry. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogEntrySourceLocation { - /// Optional. Source file name. Depending on the runtime environment, this - /// might be a simple name or a fully-qualified name. - #[prost(string, tag = "1")] - pub file: ::prost::alloc::string::String, - /// Optional. Line within the source file. 1-based; 0 indicates no line number - /// available. - #[prost(int64, tag = "2")] - pub line: i64, - /// Optional. Human-readable name of the function or method being invoked, with - /// optional context such as the class or package name. This information may be - /// used in contexts such as the logs viewer, where a file and line number are - /// less meaningful. The format can vary by language. For example: - /// `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` - /// (Python). - #[prost(string, tag = "3")] - pub function: ::prost::alloc::string::String, -} -/// Additional information used to correlate multiple log entries. Used when a -/// single LogEntry would exceed the Google Cloud Logging size limit and is -/// split across multiple log entries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogSplit { - /// A globally unique identifier for all log entries in a sequence of split log - /// entries. All log entries with the same |LogSplit.uid| are assumed to be - /// part of the same sequence of split log entries. - #[prost(string, tag = "1")] - pub uid: ::prost::alloc::string::String, - /// The index of this LogEntry in the sequence of split log entries. Log - /// entries are given |index| values 0, 1, ..., n-1 for a sequence of n log - /// entries. - #[prost(int32, tag = "2")] - pub index: i32, - /// The total number of log entries that the original LogEntry was split into. - #[prost(int32, tag = "3")] - pub total_splits: i32, -} -/// The parameters to DeleteLog. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteLogRequest { - /// Required. The resource name of the log to delete: - /// - /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` - /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` - /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` - /// - /// `\[LOG_ID\]` must be URL-encoded. For example, - /// `"projects/my-project-id/logs/syslog"`, - /// `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. - /// - /// For more information about log names, see - /// \[LogEntry][google.logging.v2.LogEntry\]. - #[prost(string, tag = "1")] - pub log_name: ::prost::alloc::string::String, -} -/// The parameters to WriteLogEntries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WriteLogEntriesRequest { - /// Optional. A default log resource name that is assigned to all log entries - /// in `entries` that do not specify a value for `log_name`: - /// - /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` - /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` - /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` - /// - /// `\[LOG_ID\]` must be URL-encoded. For example: - /// - /// "projects/my-project-id/logs/syslog" - /// "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" - /// - /// The permission `logging.logEntries.create` is needed on each project, - /// organization, billing account, or folder that is receiving new log - /// entries, whether the resource is specified in `logName` or in an - /// individual log entry. - #[prost(string, tag = "1")] - pub log_name: ::prost::alloc::string::String, - /// Optional. A default monitored resource object that is assigned to all log - /// entries in `entries` that do not specify a value for `resource`. Example: - /// - /// { "type": "gce_instance", - /// "labels": { - /// "zone": "us-central1-a", "instance_id": "00000000000000000000" }} - /// - /// See \[LogEntry][google.logging.v2.LogEntry\]. - #[prost(message, optional, tag = "2")] - pub resource: ::core::option::Option, - /// Optional. Default labels that are added to the `labels` field of all log - /// entries in `entries`. If a log entry already has a label with the same key - /// as a label in this parameter, then the log entry's label is not changed. - /// See \[LogEntry][google.logging.v2.LogEntry\]. - #[prost(map = "string, string", tag = "3")] - pub labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Required. The log entries to send to Logging. The order of log - /// entries in this list does not matter. Values supplied in this method's - /// `log_name`, `resource`, and `labels` fields are copied into those log - /// entries in this list that do not include values for their corresponding - /// fields. For more information, see the - /// \[LogEntry][google.logging.v2.LogEntry\] type. - /// - /// If the `timestamp` or `insert_id` fields are missing in log entries, then - /// this method supplies the current time or a unique identifier, respectively. - /// The supplied values are chosen so that, among the log entries that did not - /// supply their own values, the entries earlier in the list will sort before - /// the entries later in the list. See the `entries.list` method. - /// - /// Log entries with timestamps that are more than the - /// [logs retention period]() in - /// the past or more than 24 hours in the future will not be available when - /// calling `entries.list`. However, those log entries can still be [exported - /// with - /// LogSinks](). - /// - /// To improve throughput and to avoid exceeding the - /// [quota limit]() for calls to - /// `entries.write`, you should try to include several log entries in this - /// list, rather than calling this method for each individual log entry. - #[prost(message, repeated, tag = "4")] - pub entries: ::prost::alloc::vec::Vec, - /// Optional. Whether valid entries should be written even if some other - /// entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any - /// entry is not written, then the response status is the error associated - /// with one of the failed entries and the response includes error details - /// keyed by the entries' zero-based index in the `entries.write` method. - #[prost(bool, tag = "5")] - pub partial_success: bool, - /// Optional. If true, the request should expect normal response, but the - /// entries won't be persisted nor exported. Useful for checking whether the - /// logging API endpoints are working properly before sending valuable data. - #[prost(bool, tag = "6")] - pub dry_run: bool, -} -/// Result returned from WriteLogEntries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WriteLogEntriesResponse {} -/// Error details for WriteLogEntries with partial success. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WriteLogEntriesPartialErrors { - /// When `WriteLogEntriesRequest.partial_success` is true, records the error - /// status for entries that were not written due to a permanent error, keyed - /// by the entry's zero-based index in `WriteLogEntriesRequest.entries`. - /// - /// Failed requests for which no entries are written will not include - /// per-entry errors. - #[prost(map = "int32, message", tag = "1")] - pub log_entry_errors: ::std::collections::HashMap, -} -/// The parameters to `ListLogEntries`. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListLogEntriesRequest { - /// Required. Names of one or more parent resources from which to - /// retrieve log entries: - /// - /// * `projects/\[PROJECT_ID\]` - /// * `organizations/\[ORGANIZATION_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` - /// * `folders/\[FOLDER_ID\]` - /// - /// May alternatively be one or more views: - /// - /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// - /// Projects listed in the `project_ids` field are added to this list. - #[prost(string, repeated, tag = "8")] - pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Optional. A filter that chooses which log entries to return. See [Advanced - /// Logs Queries](). - /// Only log entries that match the filter are returned. An empty filter - /// matches all log entries in the resources listed in `resource_names`. - /// Referencing a parent resource that is not listed in `resource_names` will - /// cause the filter to return no results. The maximum length of the filter is - /// 20000 characters. - #[prost(string, tag = "2")] - pub filter: ::prost::alloc::string::String, - /// Optional. How the results should be sorted. Presently, the only permitted - /// values are `"timestamp asc"` (default) and `"timestamp desc"`. The first - /// option returns entries in order of increasing values of - /// `LogEntry.timestamp` (oldest first), and the second option returns entries - /// in order of decreasing timestamps (newest first). Entries with equal - /// timestamps are returned in order of their `insert_id` values. - #[prost(string, tag = "3")] - pub order_by: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. Default is 50. - /// If the value is negative or exceeds 1000, the request is rejected. The - /// presence of `next_page_token` in the response indicates that more results - /// might be available. - #[prost(int32, tag = "4")] - pub page_size: i32, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `page_token` must be the value of - /// `next_page_token` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "5")] - pub page_token: ::prost::alloc::string::String, -} -/// Result returned from `ListLogEntries`. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListLogEntriesResponse { - /// A list of log entries. If `entries` is empty, `nextPageToken` may still be - /// returned, indicating that more entries may exist. See `nextPageToken` for - /// more information. - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - /// If there might be more results than those appearing in this response, then - /// `nextPageToken` is included. To get the next set of results, call this - /// method again using the value of `nextPageToken` as `pageToken`. - /// - /// If a value for `next_page_token` appears and the `entries` field is empty, - /// it means that the search found no log entries so far but it did not have - /// time to search all the possible log entries. Retry the method with this - /// value for `page_token` to continue the search. Alternatively, consider - /// speeding up the search by changing your filter to specify a single log name - /// or resource type, or to narrow the time range of the search. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to ListMonitoredResourceDescriptors -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListMonitoredResourceDescriptorsRequest { - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "1")] - pub page_size: i32, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "2")] - pub page_token: ::prost::alloc::string::String, -} -/// Result returned from ListMonitoredResourceDescriptors. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListMonitoredResourceDescriptorsResponse { - /// A list of resource descriptors. - #[prost(message, repeated, tag = "1")] - pub resource_descriptors: - ::prost::alloc::vec::Vec, - /// If there might be more results than those appearing in this response, then - /// `nextPageToken` is included. To get the next set of results, call this - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to ListLogs. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListLogsRequest { - /// Required. The resource name that owns the logs: - /// - /// * `projects/\[PROJECT_ID\]` - /// * `organizations/\[ORGANIZATION_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` - /// * `folders/\[FOLDER_ID\]` - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "2")] - pub page_size: i32, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "3")] - pub page_token: ::prost::alloc::string::String, - /// Optional. The resource name that owns the logs: - /// - /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// - /// To support legacy queries, it could also be: - /// - /// * `projects/\[PROJECT_ID\]` - /// * `organizations/\[ORGANIZATION_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` - /// * `folders/\[FOLDER_ID\]` - #[prost(string, repeated, tag = "8")] - pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Result returned from ListLogs. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListLogsResponse { - /// A list of log names. For example, - /// `"projects/my-project/logs/syslog"` or - /// `"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"`. - #[prost(string, repeated, tag = "3")] - pub log_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// If there might be more results than those appearing in this response, then - /// `nextPageToken` is included. To get the next set of results, call this - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to `TailLogEntries`. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TailLogEntriesRequest { - /// Required. Name of a parent resource from which to retrieve log entries: - /// - /// * `projects/\[PROJECT_ID\]` - /// * `organizations/\[ORGANIZATION_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` - /// * `folders/\[FOLDER_ID\]` - /// - /// May alternatively be one or more views: - /// - /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` - #[prost(string, repeated, tag = "1")] - pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Optional. A filter that chooses which log entries to return. See [Advanced - /// Logs Filters](). - /// Only log entries that match the filter are returned. An empty filter - /// matches all log entries in the resources listed in `resource_names`. - /// Referencing a parent resource that is not in `resource_names` will cause - /// the filter to return no results. The maximum length of the filter is 20000 - /// characters. - #[prost(string, tag = "2")] - pub filter: ::prost::alloc::string::String, - /// Optional. The amount of time to buffer log entries at the server before - /// being returned to prevent out of order results due to late arriving log - /// entries. Valid values are between 0-60000 milliseconds. Defaults to 2000 - /// milliseconds. - #[prost(message, optional, tag = "3")] - pub buffer_window: ::core::option::Option<::prost_types::Duration>, -} -/// Result returned from `TailLogEntries`. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TailLogEntriesResponse { - /// A list of log entries. Each response in the stream will order entries with - /// increasing values of `LogEntry.timestamp`. Ordering is not guaranteed - /// between separate responses. - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - /// If entries that otherwise would have been included in the session were not - /// sent back to the client, counts of relevant entries omitted from the - /// session with the reason that they were not included. There will be at most - /// one of each reason per response. The counts represent the number of - /// suppressed entries since the last streamed response. - #[prost(message, repeated, tag = "2")] - pub suppression_info: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `TailLogEntriesResponse`. -pub mod tail_log_entries_response { - /// Information about entries that were omitted from the session. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct SuppressionInfo { - /// The reason that entries were omitted from the session. - #[prost(enumeration = "suppression_info::Reason", tag = "1")] - pub reason: i32, - /// A lower bound on the count of entries omitted due to `reason`. - #[prost(int32, tag = "2")] - pub suppressed_count: i32, - } - /// Nested message and enum types in `SuppressionInfo`. - pub mod suppression_info { - /// An indicator of why entries were omitted. - #[derive( - Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, - )] - #[repr(i32)] - pub enum Reason { - /// Unexpected default. - Unspecified = 0, - /// Indicates suppression occurred due to relevant entries being - /// received in excess of rate limits. For quotas and limits, see - /// [Logging API quotas and - /// limits](). - RateLimit = 1, - /// Indicates suppression occurred due to the client not consuming - /// responses quickly enough. - NotConsumed = 2, - } - impl Reason { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Reason::Unspecified => "REASON_UNSPECIFIED", - Reason::RateLimit => "RATE_LIMIT", - Reason::NotConsumed => "NOT_CONSUMED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "REASON_UNSPECIFIED" => Some(Self::Unspecified), - "RATE_LIMIT" => Some(Self::RateLimit), - "NOT_CONSUMED" => Some(Self::NotConsumed), - _ => None, - } - } - } - } -} -/// Generated client implementations. -pub mod logging_service_v2_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::http::Uri; - use tonic::codegen::*; - /// Service for ingesting and querying logs. - #[derive(Debug, Clone)] - pub struct LoggingServiceV2Client { - inner: tonic::client::Grpc, - } - impl LoggingServiceV2Client { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl LoggingServiceV2Client - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> LoggingServiceV2Client> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - >>::Error: - Into + Send + Sync, - { - LoggingServiceV2Client::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Deletes all the log entries in a log for the _Default Log Bucket. The log - /// reappears if it receives new entries. Log entries written shortly before - /// the delete operation might not be deleted. Entries received after the - /// delete operation with a timestamp before the operation will be deleted. - pub async fn delete_log( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/DeleteLog", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "DeleteLog", - )); - self.inner.unary(req, path, codec).await - } - /// Writes log entries to Logging. This API method is the - /// only way to send log entries to Logging. This method - /// is used, directly or indirectly, by the Logging agent - /// (fluentd) and all logging libraries configured to use Logging. - /// A single request may contain log entries for a maximum of 1000 - /// different resources (projects, organizations, billing accounts or - /// folders) - pub async fn write_log_entries( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/WriteLogEntries", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "WriteLogEntries", - )); - self.inner.unary(req, path, codec).await - } - /// Lists log entries. Use this method to retrieve log entries that originated - /// from a project/folder/organization/billing account. For ways to export log - /// entries, see [Exporting - /// Logs](https://cloud.google.com/logging/docs/export). - pub async fn list_log_entries( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/ListLogEntries", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "ListLogEntries", - )); - self.inner.unary(req, path, codec).await - } - /// Lists the descriptors for monitored resource types used by Logging. - pub async fn list_monitored_resource_descriptors( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "ListMonitoredResourceDescriptors", - )); - self.inner.unary(req, path, codec).await - } - /// Lists the logs in projects, organizations, folders, or billing accounts. - /// Only logs that have entries are listed. - pub async fn list_logs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/ListLogs", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "ListLogs", - )); - self.inner.unary(req, path, codec).await - } - /// Streaming read of log entries as they are ingested. Until the stream is - /// terminated, it will continue reading logs. - pub async fn tail_log_entries( - &mut self, - request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.LoggingServiceV2/TailLogEntries", - ); - let mut req = request.into_streaming_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.logging.v2.LoggingServiceV2", - "TailLogEntries", - )); - self.inner.streaming(req, path, codec).await - } - } -} diff --git a/opentelemetry-stackdriver/src/proto/mod.rs b/opentelemetry-stackdriver/src/proto/mod.rs deleted file mode 100644 index 631d830132..0000000000 --- a/opentelemetry-stackdriver/src/proto/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -pub mod api; - -pub mod devtools { - pub mod cloudtrace { - pub mod v2; - } -} - -pub mod logging { - pub mod r#type; - pub mod v2; -} - -pub mod rpc; diff --git a/opentelemetry-stackdriver/src/proto/rpc.rs b/opentelemetry-stackdriver/src/proto/rpc.rs deleted file mode 100644 index e20cb14834..0000000000 --- a/opentelemetry-stackdriver/src/proto/rpc.rs +++ /dev/null @@ -1,25 +0,0 @@ -/// The `Status` type defines a logical error model that is suitable for -/// different programming environments, including REST APIs and RPC APIs. It is -/// used by \[gRPC\](). Each `Status` message contains -/// three pieces of data: error code, error message, and error details. -/// -/// You can find out more about this error model and how to work with it in the -/// [API Design Guide](). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Status { - /// The status code, which should be an enum value of - /// \[google.rpc.Code][google.rpc.Code\]. - #[prost(int32, tag = "1")] - pub code: i32, - /// A developer-facing error message, which should be in English. Any - /// user-facing error message should be localized and sent in the - /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized - /// by the client. - #[prost(string, tag = "2")] - pub message: ::prost::alloc::string::String, - /// A list of messages that carry the error details. There is a common set of - /// message types for APIs to use. - #[prost(message, repeated, tag = "3")] - pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, -} diff --git a/opentelemetry-stackdriver/tests/generate.rs b/opentelemetry-stackdriver/tests/generate.rs deleted file mode 100644 index d7284fca42..0000000000 --- a/opentelemetry-stackdriver/tests/generate.rs +++ /dev/null @@ -1,261 +0,0 @@ -use std::collections::HashMap; -use std::ffi::OsStr; -use std::fs; -use std::path::PathBuf; -use std::process::Command; - -use futures_util::stream::FuturesUnordered; -use futures_util::stream::StreamExt; -use walkdir::WalkDir; - -/// Download the latest protobuf schemas from the Google APIs GitHub repository. -/// -/// This test is ignored by default, but can be run with `cargo test sync_schemas -- --ignored`. -#[tokio::test] -#[ignore] -async fn sync_schemas() { - let client = reqwest::Client::new(); - let cache = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("proto/google"); - let schemas = PREREQUISITE_SCHEMAS - .iter() - .chain(GENERATE_FROM_SCHEMAS.iter()); - - let mut futures = FuturesUnordered::new(); - for path in schemas.copied() { - let filename = cache.join(path); - let client = client.clone(); - futures.push(async move { - let url = format!("{BASE_URI}/{path}"); - let rsp = client.get(url).send().await.unwrap(); - let body = rsp.text().await.unwrap(); - fs::create_dir_all(filename.parent().unwrap()).unwrap(); - fs::write(filename, body).unwrap(); - }); - } - - while futures.next().await.is_some() {} -} - -/// Use the protobuf schemas downloaded by the `sync_schemas` test to generate code. -/// -/// This test will fail if the code currently in the repository is different from the -/// newly generated code, and will update it in place in that case. -#[test] -fn generated_code_is_fresh() { - // Generate code into a temporary directory. - - let schemas = GENERATE_FROM_SCHEMAS - .iter() - .map(|s| format!("google/{s}")) - .collect::>(); - - let tmp_dir = tempfile::tempdir().unwrap(); - fs::create_dir_all(&tmp_dir).unwrap(); - tonic_build::configure() - .build_client(true) - .build_server(false) - .out_dir(&tmp_dir) - .compile(&schemas, &["proto"]) - .unwrap(); - - // Next, wrangle the generated file names into a directory hierarchy. - - let (mut modules, mut renames) = (Vec::new(), Vec::new()); - for entry in fs::read_dir(&tmp_dir).unwrap() { - let path = entry.unwrap().path(); - - // Tonic now uses prettyplease instead of rustfmt, which causes a - // number of differences in the generated code. - Command::new("rustfmt") - .arg("--edition=2021") - .arg(&path) - .output() - .unwrap(); - - let file_name_str = path.file_name().and_then(|s| s.to_str()).unwrap(); - let (base, _) = file_name_str - .strip_prefix("google.") - .unwrap() - .rsplit_once('.') - .unwrap(); - - let new = match base.rsplit_once('.') { - Some((dir, fname)) => { - let mut module = dir.split('.').map(|s| s.to_owned()).collect::>(); - module.push(fname.to_owned()); - modules.push(module); - tmp_dir - .path() - .join(dir.replace('.', "/").replace("r#", "")) - .join(format!("{}.rs", fname.replace("r#", ""))) - } - None => { - let new = tmp_dir - .path() - .join(format!("{}.rs", base.replace("r#", ""))); - modules.push(vec![base.to_owned()]); - new - } - }; - - renames.push((path, new)); - } - - // Rename the files into place after iterating over the old version. - - for (old, new) in renames { - fs::create_dir_all(new.parent().unwrap()).unwrap(); - fs::rename(old, new).unwrap(); - } - - // Build the module root and write it to `mod.rs`. - - modules.sort_unstable(); - let mut previous: &[String] = &[]; - let (mut root, mut level) = (String::new(), 0); - for module in &modules { - // Find out how many modules to close and what modules to open. - - let parent = &module[..module.len() - 1]; - let (mut close, mut open) = (0, vec![]); - let components = Ord::max(previous.len(), parent.len()); - for i in 0..components { - let (prev, cur) = (previous.get(i), parent.get(i)); - if prev == cur && close == 0 && open.is_empty() { - continue; - } - - match (prev, cur) { - (Some(_), Some(new)) => { - close += 1; - open.push(new); - } - (Some(_), None) => close += 1, - (None, Some(new)) => open.push(new), - (None, None) => unreachable!(), - } - } - - // Close modules. - - let closed = close > 0; - while close > 0 { - for _ in 0..((level - 1) * 4) { - root.push(' '); - } - root.push_str("}\n"); - close -= 1; - level -= 1; - } - - if closed { - root.push('\n'); - } - - // Open modules. - - let mut opened = false; - for component in &open { - if !opened && !closed { - root.push('\n'); - opened = true; - } - - for _ in 0..(level * 4) { - root.push(' '); - } - - root.push_str("pub mod "); - root.push_str(component); - root.push_str(" {\n"); - level += 1; - } - - // Write a module declaration for this actual module. - - for _ in 0..(level * 4) { - root.push(' '); - } - root.push_str("pub mod "); - root.push_str(module.last().unwrap()); - root.push_str(";\n"); - previous = parent; - } - - while level > 0 { - level -= 1; - for _ in 0..(level * 4) { - root.push(' '); - } - root.push_str("}\n"); - } - - fs::write(tmp_dir.path().join("mod.rs"), root).unwrap(); - - // Move on to actually comparing the old and new versions. - - let versions = [SOURCE_DIR, tmp_dir.path().to_str().unwrap()] - .iter() - .map(|path| { - let mut files = HashMap::new(); - for entry in WalkDir::new(path) { - let entry = match entry { - Ok(e) => e, - Err(_) => continue, - }; - - let is_file = entry.file_type().is_file(); - let rs = entry.path().extension() == Some(OsStr::new("rs")); - if !is_file || !rs { - continue; - } - - let file = entry.path(); - let name = file.strip_prefix(path).unwrap(); - files.insert(name.to_owned(), fs::read_to_string(file).unwrap()); - } - - files - }) - .collect::>(); - - // Compare the old version and new version and fail the test if they're different. - - let mut keys = versions[0].keys().collect::>(); - keys.extend(versions[1].keys()); - keys.sort_unstable(); - keys.dedup(); - - if versions[0] != versions[1] { - let _ = fs::remove_dir_all(SOURCE_DIR); - fs::rename(tmp_dir, SOURCE_DIR).unwrap(); - panic!("generated code in the repository is outdated, updating..."); - } -} - -/// Schema files used as input for the generated code. -const GENERATE_FROM_SCHEMAS: &[&str] = &[ - "devtools/cloudtrace/v2/tracing.proto", - "devtools/cloudtrace/v2/trace.proto", - "logging/type/http_request.proto", - "logging/v2/log_entry.proto", - "logging/v2/logging.proto", - "rpc/status.proto", -]; - -/// Schema files that are dependencies of the `GENERATED_SCHEMAS`. -const PREREQUISITE_SCHEMAS: &[&str] = &[ - "api/annotations.proto", - "api/resource.proto", - "api/monitored_resource.proto", - "api/field_behavior.proto", - "api/http.proto", - "api/client.proto", - "logging/type/log_severity.proto", - "api/label.proto", - "api/launch_stage.proto", - "logging/v2/logging_config.proto", -]; - -const BASE_URI: &str = "https://raw.githubusercontent.com/googleapis/googleapis/master/google"; -const SOURCE_DIR: &str = "src/proto"; diff --git a/opentelemetry-user-events-logs/CHANGELOG.md b/opentelemetry-user-events-logs/CHANGELOG.md deleted file mode 100644 index c61e4476ea..0000000000 --- a/opentelemetry-user-events-logs/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ -# Changelog - -## Unreleased - -## v0.2.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) - -## v0.1.0 - -### Added - -- Initial Alpha implementation diff --git a/opentelemetry-user-events-logs/CODEOWNERS b/opentelemetry-user-events-logs/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-user-events-logs/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-user-events-logs/Cargo.toml b/opentelemetry-user-events-logs/Cargo.toml deleted file mode 100644 index e0d545f230..0000000000 --- a/opentelemetry-user-events-logs/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "opentelemetry-user-events-logs" -description = "OpenTelemetry-Rust exporter to userevents" -version = "0.2.0" -edition = "2021" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-logs" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-logs" -readme = "README.md" -rust-version = "1.65.0" -keywords = ["opentelemetry", "log", "trace", "user_events"] -license = "Apache-2.0" - -[dependencies] -eventheader = "0.3.2" -eventheader_dynamic = "0.3.3" -opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["logs"] } -opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["logs"] } -async-std = { version="1.6" } -async-trait = { version="0.1" } -chrono = { version="0.4", default-features = false, features=["std"] } - -[dev-dependencies] -opentelemetry-appender-tracing = { path = "../opentelemetry-appender-tracing" } -tracing = { version = "0.1", default-features = false, features = ["std"] } -tracing-core = "0.1.31" -tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } -microbench = "0.5" - -[features] -logs_level_enabled = ["opentelemetry/logs_level_enabled", "opentelemetry_sdk/logs_level_enabled"] -default=["logs_level_enabled"] - -[[example]] -name = "basic" -path = "examples/basic.rs" diff --git a/opentelemetry-user-events-logs/LICENSE b/opentelemetry-user-events-logs/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/opentelemetry-user-events-logs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-user-events-logs/README.md b/opentelemetry-user-events-logs/README.md deleted file mode 100644 index 20d186b800..0000000000 --- a/opentelemetry-user-events-logs/README.md +++ /dev/null @@ -1,17 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# OpenTelemetry user_events Exporter - -## Overview - -[user_events](https://docs.kernel.org/trace/user_events.html) is Linux solution for user process tracing, similar to ETW (Event Tracing for Windows) on Windows. It builds on top of the Linux Tracepoints, and so allows user processes to create events and trace data that can be viewed via existing tools like ftrace and perf. - -This kernel feature is supported started in Linux kernel 5.18 onwards. The feature enables - - A faster path for tracing from user mode application utilizing kernel mode memory address space. - - User processes can now export telemetry events only when it is useful i.e, when the registered set of tracepoint events are enabled. - - This user_events exporter enables applications to use OpenTelemetry API to capture the telemetry events, and write to user_events subsystem. From user_events, the events can be - - Captured by the agents running locally, and listening for specific events withing user_events subsystem. - - Or real-time monitoring using local Linux tool like [perf](https://perf.wiki.kernel.org/index.php/Main_Page) or ftrace. diff --git a/opentelemetry-user-events-logs/examples/basic.rs b/opentelemetry-user-events-logs/examples/basic.rs deleted file mode 100644 index 67977cd912..0000000000 --- a/opentelemetry-user-events-logs/examples/basic.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! run with `$ cargo run --example basic --all-features - -use opentelemetry_appender_tracing::layer; -use opentelemetry_sdk::logs::LoggerProvider; -use opentelemetry_user_events_logs::{ExporterConfig, ReentrantLogProcessor}; -use std::collections::HashMap; -use tracing::error; -use tracing_subscriber::prelude::*; - -fn init_logger() -> LoggerProvider { - let exporter_config = ExporterConfig { - default_keyword: 1, - keywords_map: HashMap::new(), - }; - let reenterant_processor = ReentrantLogProcessor::new("test", None, exporter_config); - LoggerProvider::builder() - .with_log_processor(reenterant_processor) - .build() -} - -fn main() { - // Example with tracing appender. - let logger_provider = init_logger(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - tracing_subscriber::registry().with(layer).init(); - - // event_name is now passed as an attribute, but once https://github.com/tokio-rs/tracing/issues/1426 - // is done, it can be passed with name:"my-event-name", so it'll be available as metadata for - // fast filtering. - // event_id is also passed as an attribute now, there is nothing in metadata where a - // numeric id can be stored. - error!( - name: "my-event-name", - event_id = 20, - user_name = "otel user", - user_email = "otel@opentelemetry.io" - ); -} diff --git a/opentelemetry-user-events-logs/src/lib.rs b/opentelemetry-user-events-logs/src/lib.rs deleted file mode 100644 index 93df8c77e5..0000000000 --- a/opentelemetry-user-events-logs/src/lib.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! The user_events exporter will enable applications to use OpenTelemetry API -//! to capture the telemetry events, and write to user_events subsystem. - -#![warn(missing_debug_implementations, missing_docs)] - -mod logs; - -pub use logs::*; diff --git a/opentelemetry-user-events-logs/src/logs/exporter.rs b/opentelemetry-user-events-logs/src/logs/exporter.rs deleted file mode 100644 index 3825af496d..0000000000 --- a/opentelemetry-user-events-logs/src/logs/exporter.rs +++ /dev/null @@ -1,347 +0,0 @@ -use async_trait::async_trait; -use eventheader::{FieldFormat, Level, Opcode}; -use eventheader_dynamic::EventBuilder; -use std::borrow::Cow; -use std::collections::HashMap; -use std::fmt::Debug; -use std::sync::Arc; - -use opentelemetry::{logs::AnyValue, logs::Severity, Key}; -use std::{cell::RefCell, str, time::SystemTime}; - -/// Provider group associated with the user_events exporter -pub type ProviderGroup = Option>; - -thread_local! { static EBW: RefCell = RefCell::new(EventBuilder::new());} - -/// Exporter config -#[derive(Debug)] -pub struct ExporterConfig { - /// keyword associated with user_events name - /// These should be mapped to logger_name as of now. - pub keywords_map: HashMap, - /// default keyword if map is not defined. - pub default_keyword: u64, -} - -impl Default for ExporterConfig { - fn default() -> Self { - ExporterConfig { - keywords_map: HashMap::new(), - default_keyword: 1, - } - } -} - -impl ExporterConfig { - pub(crate) fn get_log_keyword(&self, name: &str) -> Option { - self.keywords_map.get(name).copied() - } - - pub(crate) fn get_log_keyword_or_default(&self, name: &str) -> Option { - if self.keywords_map.is_empty() { - Some(self.default_keyword) - } else { - self.get_log_keyword(name) - } - } -} -pub(crate) struct UserEventsExporter { - provider: Arc, - exporter_config: ExporterConfig, -} - -const EVENT_ID: &str = "event_id"; -const EVENT_NAME_PRIMARY: &str = "event_name"; -const EVENT_NAME_SECONDARY: &str = "name"; - -//TBD - How to configure provider name and provider group -impl UserEventsExporter { - pub(crate) fn new( - provider_name: &str, - _provider_group: ProviderGroup, - exporter_config: ExporterConfig, - ) -> Self { - let mut options = eventheader_dynamic::Provider::new_options(); - options = *options.group_name(provider_name); - let mut eventheader_provider: eventheader_dynamic::Provider = - eventheader_dynamic::Provider::new(provider_name, &options); - Self::register_keywords(&mut eventheader_provider, &exporter_config); - UserEventsExporter { - provider: Arc::new(eventheader_provider), - exporter_config, - } - } - - fn register_events(eventheader_provider: &mut eventheader_dynamic::Provider, keyword: u64) { - let levels = [ - eventheader::Level::Informational, - eventheader::Level::Verbose, - eventheader::Level::Warning, - eventheader::Level::Error, - eventheader::Level::CriticalError, - ]; - - for &level in levels.iter() { - eventheader_provider.register_set(level, keyword); - } - } - - fn register_keywords( - eventheader_provider: &mut eventheader_dynamic::Provider, - exporter_config: &ExporterConfig, - ) { - if exporter_config.keywords_map.is_empty() { - println!( - "Register default keyword {}", - exporter_config.default_keyword - ); - Self::register_events(eventheader_provider, exporter_config.default_keyword); - } - - for keyword in exporter_config.keywords_map.values() { - Self::register_events(eventheader_provider, *keyword); - } - } - - fn add_attribute_to_event(&self, eb: &mut EventBuilder, attrib: &(Key, AnyValue)) { - let field_name = &attrib.0.to_string(); - match attrib.1.to_owned() { - AnyValue::Boolean(b) => { - eb.add_value(field_name, b, FieldFormat::Boolean, 0); - } - AnyValue::Int(i) => { - eb.add_value(field_name, i, FieldFormat::SignedInt, 0); - } - AnyValue::Double(f) => { - eb.add_value(field_name, f, FieldFormat::Float, 0); - } - AnyValue::String(s) => { - eb.add_str(field_name, &s.to_string(), FieldFormat::Default, 0); - } - _ => (), - } - } - - fn get_severity_level(&self, severity: Severity) -> Level { - match severity { - Severity::Debug - | Severity::Debug2 - | Severity::Debug3 - | Severity::Debug4 - | Severity::Trace - | Severity::Trace2 - | Severity::Trace3 - | Severity::Trace4 => eventheader::Level::Verbose, - - Severity::Info | Severity::Info2 | Severity::Info3 | Severity::Info4 => { - eventheader::Level::Informational - } - - Severity::Error | Severity::Error2 | Severity::Error3 | Severity::Error4 => { - eventheader::Level::Error - } - - Severity::Fatal | Severity::Fatal2 | Severity::Fatal3 | Severity::Fatal4 => { - eventheader::Level::CriticalError - } - - Severity::Warn | Severity::Warn2 | Severity::Warn3 | Severity::Warn4 => { - eventheader::Level::Warning - } - } - } - - #[allow(dead_code)] - fn enabled(&self, level: u8, keyword: u64) -> bool { - let es = self.provider.find_set(level.into(), keyword); - match es { - Some(x) => x.enabled(), - _ => false, - }; - false - } - - pub(crate) fn export_log_data( - &self, - log_data: &opentelemetry_sdk::export::logs::LogData, - ) -> opentelemetry_sdk::export::logs::ExportResult { - let mut level: Level = Level::Invalid; - if log_data.record.severity_number.is_some() { - level = self.get_severity_level(log_data.record.severity_number.unwrap()); - } - - let keyword = self - .exporter_config - .get_log_keyword_or_default(log_data.instrumentation.name.as_ref()); - - if keyword.is_none() { - return Ok(()); - } - - let log_es = if let Some(es) = self - .provider - .find_set(level.as_int().into(), keyword.unwrap()) - { - es - } else { - return Ok(()); - }; - if log_es.enabled() { - EBW.with(|eb| { - let mut eb = eb.borrow_mut(); - let event_tags: u32 = 0; // TBD name and event_tag values - eb.reset(log_data.instrumentation.name.as_ref(), event_tags as u16); - eb.opcode(Opcode::Info); - - eb.add_value("__csver__", 0x0401u16, FieldFormat::HexInt, 0); - - // populate CS PartA - let mut cs_a_count = 0; - let event_time: SystemTime = log_data - .record - .timestamp - .unwrap_or(log_data.record.observed_timestamp); - cs_a_count += 1; // for event_time - eb.add_struct("PartA", cs_a_count, 0); - { - let time: String = chrono::DateTime::to_rfc3339( - &chrono::DateTime::::from(event_time), - ); - eb.add_str("time", time, FieldFormat::Default, 0); - } - //populate CS PartC - let (mut is_event_id, mut event_id) = (false, 0); - let (mut is_event_name, mut event_name) = (false, ""); - - if let Some(attr_list) = &log_data.record.attributes { - let (mut is_part_c_present, mut cs_c_bookmark, mut cs_c_count) = (false, 0, 0); - for attrib in attr_list.iter() { - match (attrib.0.as_str(), &attrib.1) { - (EVENT_ID, AnyValue::Int(value)) => { - is_event_id = true; - event_id = *value; - continue; - } - (EVENT_NAME_PRIMARY, AnyValue::String(value)) => { - is_event_name = true; - event_name = value.as_str(); - continue; - } - (EVENT_NAME_SECONDARY, AnyValue::String(value)) => { - if !is_event_name { - event_name = value.as_str(); - } - continue; - } - _ => { - if !is_part_c_present { - eb.add_struct_with_bookmark("PartC", 1, 0, &mut cs_c_bookmark); - is_part_c_present = true; - } - self.add_attribute_to_event(&mut eb, attrib); - cs_c_count += 1; - } - } - } - - if is_part_c_present { - eb.set_struct_field_count(cs_c_bookmark, cs_c_count); - } - } - // populate CS PartB - let mut cs_b_bookmark: usize = 0; - let mut cs_b_count = 0; - eb.add_struct_with_bookmark("PartB", 1, 0, &mut cs_b_bookmark); - eb.add_str("_typeName", "Logs", FieldFormat::Default, 0); - cs_b_count += 1; - - if log_data.record.body.is_some() { - eb.add_str( - "body", - match log_data.record.body.as_ref().unwrap() { - AnyValue::Int(value) => value.to_string(), - AnyValue::String(value) => value.to_string(), - AnyValue::Boolean(value) => value.to_string(), - AnyValue::Double(value) => value.to_string(), - AnyValue::Bytes(value) => String::from_utf8_lossy(value).to_string(), - AnyValue::ListAny(_value) => "".to_string(), - AnyValue::Map(_value) => "".to_string(), - }, - FieldFormat::Default, - 0, - ); - cs_b_count += 1; - } - if level != Level::Invalid { - eb.add_value("severityNumber", level.as_int(), FieldFormat::SignedInt, 0); - cs_b_count += 1; - } - if log_data.record.severity_text.is_some() { - eb.add_str( - "severityText", - log_data.record.severity_text.as_ref().unwrap().as_ref(), - FieldFormat::SignedInt, - 0, - ); - cs_b_count += 1; - } - if is_event_id { - eb.add_value("eventId", event_id, FieldFormat::SignedInt, 0); - cs_b_count += 1; - } - if !event_name.is_empty() { - eb.add_str("name", event_name, FieldFormat::Default, 0); - cs_b_count += 1; - } - eb.set_struct_field_count(cs_b_bookmark, cs_b_count); - - eb.write(&log_es, None, None); - }); - return Ok(()); - } - Ok(()) - } -} - -impl Debug for UserEventsExporter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("user_events log exporter") - } -} - -#[async_trait] -impl opentelemetry_sdk::export::logs::LogExporter for UserEventsExporter { - async fn export( - &mut self, - batch: Vec, - ) -> opentelemetry::logs::LogResult<()> { - for log_data in batch { - let _ = self.export_log_data(&log_data); - } - Ok(()) - } - - #[cfg(feature = "logs_level_enabled")] - fn event_enabled(&self, level: Severity, _target: &str, name: &str) -> bool { - let (found, keyword) = if self.exporter_config.keywords_map.is_empty() { - (true, self.exporter_config.default_keyword) - } else { - // TBD - target is not used as of now for comparison. - match self.exporter_config.get_log_keyword(name) { - Some(x) => (true, x), - _ => (false, 0), - } - }; - if !found { - return false; - } - let es = self - .provider - .find_set(self.get_severity_level(level), keyword); - match es { - Some(x) => x.enabled(), - _ => false, - } - } -} diff --git a/opentelemetry-user-events-logs/src/logs/mod.rs b/opentelemetry-user-events-logs/src/logs/mod.rs deleted file mode 100644 index 7f0e26819a..0000000000 --- a/opentelemetry-user-events-logs/src/logs/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod exporter; -pub use exporter::*; - -mod reentrant_logprocessor; -pub use reentrant_logprocessor::*; diff --git a/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs b/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs deleted file mode 100644 index d66447b83a..0000000000 --- a/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs +++ /dev/null @@ -1,61 +0,0 @@ -use std::fmt::Debug; - -use opentelemetry::logs::LogResult; -use opentelemetry_sdk::export::logs::LogData; - -#[cfg(feature = "logs_level_enabled")] -use opentelemetry_sdk::export::logs::LogExporter; - -use crate::logs::exporter::ExporterConfig; -use crate::logs::exporter::*; - -/// This export processor exports without synchronization. -/// This is currently only used in users_event exporter, where we know -/// that the underlying exporter is safe under concurrent calls - -#[derive(Debug)] -pub struct ReentrantLogProcessor { - event_exporter: UserEventsExporter, -} - -impl ReentrantLogProcessor { - /// constructor - pub fn new( - provider_name: &str, - provider_group: ProviderGroup, - exporter_config: ExporterConfig, - ) -> Self { - let exporter = UserEventsExporter::new(provider_name, provider_group, exporter_config); - ReentrantLogProcessor { - event_exporter: exporter, - } - } -} - -impl opentelemetry_sdk::logs::LogProcessor for ReentrantLogProcessor { - fn emit(&self, data: LogData) { - _ = self.event_exporter.export_log_data(&data); - } - - // This is a no-op as this processor doesn't keep anything - // in memory to be flushed out. - fn force_flush(&self) -> LogResult<()> { - Ok(()) - } - - // This is a no-op no special cleanup is required before - // shutdown. - fn shutdown(&mut self) -> LogResult<()> { - Ok(()) - } - - #[cfg(feature = "logs_level_enabled")] - fn event_enabled( - &self, - level: opentelemetry::logs::Severity, - target: &str, - name: &str, - ) -> bool { - self.event_exporter.event_enabled(level, target, name) - } -} diff --git a/opentelemetry-user-events-metrics/CHANGELOG.md b/opentelemetry-user-events-metrics/CHANGELOG.md deleted file mode 100644 index 6103307eff..0000000000 --- a/opentelemetry-user-events-metrics/CHANGELOG.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -## Unreleased - -## v0.2.0 - -- Fix aggregation selector and temporality so every instruments are aggregated - correctly with expected delta temporality. - [#1287](https://github.com/open-telemetry/opentelemetry-rust/pull/1287). - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Include error diagnosing messages for registering tracepoint - [#1273](https://github.com/open-telemetry/opentelemetry-rust/pull/1273). -- Add version, protocol to schema - [#1224](https://github.com/open-telemetry/opentelemetry-rust/pull/1224). - -## v0.1.0 - -### Added - -- Initial Alpha implementation diff --git a/opentelemetry-user-events-metrics/CODEOWNERS b/opentelemetry-user-events-metrics/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-user-events-metrics/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-user-events-metrics/Cargo.toml b/opentelemetry-user-events-metrics/Cargo.toml deleted file mode 100644 index 7b8bbd1607..0000000000 --- a/opentelemetry-user-events-metrics/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "opentelemetry-user-events-metrics" -version = "0.2.0" -description = "OpenTelemetry metrics exporter to user events" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-metrics" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-user-events-metrics" -readme = "README.md" -keywords = ["opentelemetry", "metrics", "user-events"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[dependencies] -opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["metrics"] } -opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } -opentelemetry-proto = { version = "0.4", path = "../opentelemetry-proto", features = ["gen-tonic", "metrics"] } -eventheader = { version = "= 0.3.2" } -async-trait = "0.1" -prost = "0.11" - -[dev-dependencies] -tokio = { version = "1.0", features = ["full"] } - -[[example]] -name = "basic" -path = "examples/basic.rs" diff --git a/opentelemetry-user-events-metrics/LICENSE b/opentelemetry-user-events-metrics/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/opentelemetry-user-events-metrics/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-user-events-metrics/README.md b/opentelemetry-user-events-metrics/README.md deleted file mode 100644 index 20d186b800..0000000000 --- a/opentelemetry-user-events-metrics/README.md +++ /dev/null @@ -1,17 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png - -# OpenTelemetry user_events Exporter - -## Overview - -[user_events](https://docs.kernel.org/trace/user_events.html) is Linux solution for user process tracing, similar to ETW (Event Tracing for Windows) on Windows. It builds on top of the Linux Tracepoints, and so allows user processes to create events and trace data that can be viewed via existing tools like ftrace and perf. - -This kernel feature is supported started in Linux kernel 5.18 onwards. The feature enables - - A faster path for tracing from user mode application utilizing kernel mode memory address space. - - User processes can now export telemetry events only when it is useful i.e, when the registered set of tracepoint events are enabled. - - This user_events exporter enables applications to use OpenTelemetry API to capture the telemetry events, and write to user_events subsystem. From user_events, the events can be - - Captured by the agents running locally, and listening for specific events withing user_events subsystem. - - Or real-time monitoring using local Linux tool like [perf](https://perf.wiki.kernel.org/index.php/Main_Page) or ftrace. diff --git a/opentelemetry-user-events-metrics/examples/basic.rs b/opentelemetry-user-events-metrics/examples/basic.rs deleted file mode 100644 index 82fde30cca..0000000000 --- a/opentelemetry-user-events-metrics/examples/basic.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! run with `$ cargo run --example basic --all-features -use opentelemetry::{ - metrics::{MeterProvider as _, Unit}, - KeyValue, -}; -use opentelemetry_sdk::{ - metrics::{PeriodicReader, SdkMeterProvider}, - runtime, Resource, -}; -use opentelemetry_user_events_metrics::MetricsExporter; - -fn init_metrics(exporter: MetricsExporter) -> SdkMeterProvider { - let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - SdkMeterProvider::builder() - .with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "metric-demo", - )])) - .with_reader(reader) - .build() -} - -#[tokio::main] -#[allow(unused_must_use)] -async fn main() -> Result<(), Box> { - let exporter = opentelemetry_user_events_metrics::MetricsExporter::new(); - let meter_provider = init_metrics(exporter); - - let meter = meter_provider.versioned_meter( - "user-event-test", - Some("test-version"), - Some("test_url"), - Some(vec![KeyValue::new("key", "value")]), - ); - let c = meter - .f64_counter("counter_test") - .with_description("test_decription") - .with_unit(Unit::new("test_unit")) - .init(); - - c.add( - 1.0, - [ - KeyValue::new("mykey1", "myvalue1"), - KeyValue::new("mykey2", "myvalue2"), - ] - .as_ref(), - ); - - meter_provider.shutdown()?; - - Ok(()) -} diff --git a/opentelemetry-user-events-metrics/src/exporter/mod.rs b/opentelemetry-user-events-metrics/src/exporter/mod.rs deleted file mode 100644 index c6e3caca2a..0000000000 --- a/opentelemetry-user-events-metrics/src/exporter/mod.rs +++ /dev/null @@ -1,92 +0,0 @@ -use crate::transform::transform_resource_metrics; -use async_trait::async_trait; -use opentelemetry::metrics::{MetricsError, Result}; -use opentelemetry_sdk::metrics::{ - data::{ResourceMetrics, Temporality}, - exporter::PushMetricsExporter, - reader::{AggregationSelector, DefaultAggregationSelector, TemporalitySelector}, - Aggregation, InstrumentKind, -}; - -use crate::tracepoint; -use eventheader::_internal as ehi; -use prost::Message; -use std::fmt::{Debug, Formatter}; -use std::pin::Pin; - -pub struct MetricsExporter { - trace_point: Pin>, -} - -impl MetricsExporter { - pub fn new() -> MetricsExporter { - let trace_point = Box::pin(ehi::TracepointState::new(0)); - // This is unsafe because if the code is used in a shared object, - // the event MUST be unregistered before the shared object unloads. - unsafe { - let _result = tracepoint::register(trace_point.as_ref()); - } - MetricsExporter { trace_point } - } -} - -impl Default for MetricsExporter { - fn default() -> Self { - Self::new() - } -} - -impl TemporalitySelector for MetricsExporter { - // This is matching OTLP exporters delta. - fn temporality(&self, kind: InstrumentKind) -> Temporality { - match kind { - InstrumentKind::Counter - | InstrumentKind::ObservableCounter - | InstrumentKind::ObservableGauge - | InstrumentKind::Histogram => Temporality::Delta, - InstrumentKind::UpDownCounter | InstrumentKind::ObservableUpDownCounter => { - Temporality::Cumulative - } - } - } -} - -impl AggregationSelector for MetricsExporter { - // TODO: this should ideally be done at SDK level by default - // without exporters having to do it. - fn aggregation(&self, kind: InstrumentKind) -> Aggregation { - DefaultAggregationSelector::new().aggregation(kind) - } -} - -impl Debug for MetricsExporter { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("user_events metrics exporter") - } -} - -#[async_trait] -impl PushMetricsExporter for MetricsExporter { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { - if self.trace_point.enabled() { - let proto_message = transform_resource_metrics(metrics); - - let mut byte_array = Vec::new(); - let _encode_result = proto_message - .encode(&mut byte_array) - .map_err(|err| MetricsError::Other(err.to_string()))?; - let _result = tracepoint::write(&self.trace_point, byte_array.as_slice()); - } - Ok(()) - } - - async fn force_flush(&self) -> Result<()> { - Ok(()) // In this implementation, flush does nothing - } - - fn shutdown(&self) -> Result<()> { - // TracepointState automatically unregisters when dropped - // https://github.com/microsoft/LinuxTracepoints-Rust/blob/main/eventheader/src/native.rs#L618 - Ok(()) - } -} diff --git a/opentelemetry-user-events-metrics/src/lib.rs b/opentelemetry-user-events-metrics/src/lib.rs deleted file mode 100644 index 5517451e0c..0000000000 --- a/opentelemetry-user-events-metrics/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod exporter; -mod tracepoint; -mod transform; - -pub use exporter::MetricsExporter; diff --git a/opentelemetry-user-events-metrics/src/tracepoint/mod.rs b/opentelemetry-user-events-metrics/src/tracepoint/mod.rs deleted file mode 100644 index acddfe38df..0000000000 --- a/opentelemetry-user-events-metrics/src/tracepoint/mod.rs +++ /dev/null @@ -1,117 +0,0 @@ -use core::ffi; -use eventheader::_internal as ehi; -use opentelemetry::{global, metrics::MetricsError}; -use std::panic; -use std::pin::Pin; - -/// Protocol constant -const PROTOCOL_FIELD_VALUE: u32 = 0; -/// Protobuf definition version -const PROTOBUF_VERSION: &[u8; 8] = b"v0.19.00"; - -/// This is the command string for the event. It needs to follow the -/// [Command Format](https://docs.kernel.org/trace/user_events.html#command-format) -/// syntax, it needs to end with a "\0", and it needs to stay in sync with the -/// write function. -/// -/// Syntax is: "EventName Field1Type Field1Name;Field2Type Field2Name". -/// -/// For this event: -/// -/// - Event is named "otlp_metrics". -/// - Field 1 is named "protocol". Value 0 corresponds to protobuf. -/// - Field 2 is named "version". Corresponds to protocol version (protobuf version). -/// - Field 3 is named "buffer" and has type "variable-length array of u8". -/// -/// "__rel_loc" is a special type for variable-length fields. It requires -/// special handling in the write() method. -const METRICS_EVENT_DEF: &[u8] = - b"otlp_metrics u32 protocol;char[8] version;__rel_loc u8[] buffer;\0"; - -/// If the tracepoint is registered and enabled, writes an event. If the tracepoint -/// is unregistered or disabled, this does nothing and returns 0. You should usually -/// check [`enabled()`] and only build the buffer and call `write()` if `enabled()` -/// returns true. -/// -/// Requires: PROTOBUF_VERSION.len() == 8, buffer.len() < 65536. -/// -/// Return value is 0 for success or an errno code for error. The return value is -/// provided to help with debugging and should usually be ignored in release builds. -pub fn write(trace_point: &ehi::TracepointState, buffer: &[u8]) -> i32 { - // This must stay in sync with the METRICS_EVENT_DEF string. - // Return error -1 if buffer exceeds max size - if buffer.len() > u16::MAX as usize { - eprintln!("Buffer exceeds max length."); - return -1; - } - - if PROTOBUF_VERSION.len() != 8 { - eprintln!("Version must be char[8]."); - return -1; - } - - // The rel_loc for the buffer field stores the size and offset of the buffer. - // - High 16 bits store the size = buffer.len() - // - Low 16 bits store the offset of the buffer from the end of the rel_loc field = 0. - let buffer_rel_loc: u32 = (buffer.len() as u32) << 16; - - trace_point.write(&mut [ - // mut because the write method does some fix-ups. - ehi::EventDataDescriptor::zero(), // First item before buffer MUST be zero(). - ehi::EventDataDescriptor::from_value(&PROTOCOL_FIELD_VALUE), // protocol value 0 for protobuf - ehi::EventDataDescriptor::from_slice(PROTOBUF_VERSION), // protobuf definition version - ehi::EventDataDescriptor::from_value(&buffer_rel_loc), // rel_loc for the buffer field. - ehi::EventDataDescriptor::from_slice(buffer), // buffer field. - ]) -} - -/// Registers the passed in tracepoint. -/// -/// Requires: this tracepoint is not currently registered. -/// The tracepoint must be in a Pin<&TracepointState> because we must ensure it will never be moved -/// -/// Return value is 0 for success or -1 for failed register. -/// -/// # Safety -/// -/// If this code is used in a shared object, the tracepoint MUST be -/// unregistered before the shared object unloads from memory. -pub unsafe fn register(trace_point: Pin<&ehi::TracepointState>) -> i32 { - debug_assert!(METRICS_EVENT_DEF[METRICS_EVENT_DEF.len() - 1] == b'\0'); - - // CStr::from_bytes_with_nul_unchecked is ok because METRICS_EVENT_DEF ends with "\0". - // Returns errno code 95 if trace/debug file systems are not mounted - // Returns errno code 13 if insufficient permissions - // If tracepoint doesn't exist, it will create one automatically - let result = panic::catch_unwind(|| { - // CStr::from_bytes_with_nul_unchecked is ok because METRICS_EVENT_DEF ends with "\0". - trace_point.register(ffi::CStr::from_bytes_with_nul_unchecked(METRICS_EVENT_DEF)) - }); - - match result { - Ok(value) => { - if value == 0 { - // Temporary print as a measure for quick testing - // will be replaced with proper logging mechanism - println!("Tracepoint registered successfully.") - } else if value == 95 { - global::handle_error(MetricsError::Other( - "Trace/debug file systems are not mounted.".into(), - )); - } else if value == 13 { - global::handle_error(MetricsError::Other( - "Insufficient permissions. You need read/write/execute permissions to user_events tracing directory.".into(), - )); - } - value - } - // We don't want to ever panic so we catch the error and return a unique code for retry - Err(err) => { - global::handle_error(MetricsError::Other(format!( - "Tracepoint failed to register: {:?}.", - err, - ))); - -1 - } - } -} diff --git a/opentelemetry-user-events-metrics/src/transform/mod.rs b/opentelemetry-user-events-metrics/src/transform/mod.rs deleted file mode 100644 index ec736485b0..0000000000 --- a/opentelemetry-user-events-metrics/src/transform/mod.rs +++ /dev/null @@ -1,117 +0,0 @@ -use opentelemetry::{global, metrics::MetricsError}; -use opentelemetry_proto::tonic::common::v1::InstrumentationScope as TonicInstrumentationScope; -use opentelemetry_proto::tonic::resource::v1::Resource as TonicResource; -use opentelemetry_proto::tonic::{ - collector::metrics::v1::ExportMetricsServiceRequest, - metrics::v1::{ - exemplar::Value as TonicExemplarValue, metric::Data as TonicMetricData, - number_data_point::Value as TonicDataPointValue, - AggregationTemporality as TonicTemporality, DataPointFlags as TonicDataPointFlags, - Metric as TonicMetric, NumberDataPoint as TonicNumberDataPoint, - ResourceMetrics as TonicResourceMetrics, ScopeMetrics as TonicScopeMetrics, - Sum as TonicSum, - }, -}; -use opentelemetry_sdk::metrics::data::{ - Metric as SdkMetric, ResourceMetrics as SDKResourceMetrics, ScopeMetrics as SdkScopeMetrics, - Sum as SdkSum, -}; -use opentelemetry_sdk::Resource as SdkResource; -use std::any::Any; -use std::fmt; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -pub(crate) fn transform_resource_metrics( - metrics: &SDKResourceMetrics, -) -> ExportMetricsServiceRequest { - ExportMetricsServiceRequest { - resource_metrics: vec![TonicResourceMetrics { - resource: transform_resource(&metrics.resource), - scope_metrics: transform_scope_metrics(&metrics.scope_metrics), - schema_url: metrics - .resource - .schema_url() - .map(Into::into) - .unwrap_or_default(), - }], - } -} - -fn transform_resource(r: &SdkResource) -> Option { - if r.is_empty() { - return None; - } - - Some(TonicResource { - attributes: r.iter().map(Into::into).collect(), - dropped_attributes_count: 0, - }) -} - -fn transform_scope_metrics(sms: &[SdkScopeMetrics]) -> Vec { - sms.iter() - .map(|sm| TonicScopeMetrics { - scope: Some(TonicInstrumentationScope::from(&sm.scope)), - metrics: transform_metrics(&sm.metrics), - schema_url: sm - .scope - .schema_url - .as_ref() - .map(ToString::to_string) - .unwrap_or_default(), - }) - .collect() -} - -fn transform_metrics(metrics: &[SdkMetric]) -> Vec { - metrics - .iter() - .map(|metric| TonicMetric { - name: metric.name.to_string(), - description: metric.description.to_string(), - unit: metric.unit.as_str().to_string(), - data: transform_data(metric.data.as_any()), - }) - .collect() -} - -fn transform_data(data: &dyn Any) -> Option { - if let Some(sum) = data.downcast_ref::>() { - Some(TonicMetricData::Sum(transform_sum(sum))) - } else if let Some(sum) = data.downcast_ref::>() { - Some(TonicMetricData::Sum(transform_sum(sum))) - } else if let Some(sum) = data.downcast_ref::>() { - Some(TonicMetricData::Sum(transform_sum(sum))) - } else { - global::handle_error(MetricsError::Other("unknown aggregator".into())); - None - } -} - -fn transform_sum + Into + Copy>( - sum: &SdkSum, -) -> TonicSum { - TonicSum { - data_points: sum - .data_points - .iter() - .map(|dp| TonicNumberDataPoint { - attributes: dp.attributes.iter().map(Into::into).collect(), - start_time_unix_nano: dp.start_time.map(to_nanos).unwrap_or_default(), - time_unix_nano: dp.time.map(to_nanos).unwrap_or_default(), - // No support for exemplars - exemplars: Vec::new(), - flags: TonicDataPointFlags::default() as u32, - value: Some(dp.value.into()), - }) - .collect(), - aggregation_temporality: TonicTemporality::from(sum.temporality).into(), - is_monotonic: sum.is_monotonic, - } -} - -fn to_nanos(time: SystemTime) -> u64 { - time.duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_nanos() as u64 -} diff --git a/opentelemetry-zpages/CHANGELOG.md b/opentelemetry-zpages/CHANGELOG.md deleted file mode 100644 index d6d233d59f..0000000000 --- a/opentelemetry-zpages/CHANGELOG.md +++ /dev/null @@ -1,42 +0,0 @@ -# Changelog - -## vNext - -## v0.6.0 - -### Changed - -- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) -- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) -- Use tonic based generated files [#1214](https://github.com/open-telemetry/opentelemetry-rust/pull/1214) - -## v0.5.0 - -### Updates - -- Update to opentelemetry-api v0.20.0 - -## v0.4.0 - -- Update to opentelemetry v0.19.0 -- Update to opentelemetry-proto v0.2.0 -- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). -- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). - -## v0.3.0 - -### Changed - -- Update to opentelemetry v0.18.0 - -## v0.2.0 - -### Changed - -- Update to opentelemetry v0.17.0 - -## v0.1.0 - -### Added - -- Add Tracez http endpoint. diff --git a/opentelemetry-zpages/CODEOWNERS b/opentelemetry-zpages/CODEOWNERS deleted file mode 100644 index d6962a905a..0000000000 --- a/opentelemetry-zpages/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. - -# For anything not explicitly taken by someone else: -* @open-telemetry/rust-approvers diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml deleted file mode 100644 index 93014e3832..0000000000 --- a/opentelemetry-zpages/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "opentelemetry-zpages" -version = "0.6.0" -description = "ZPages implementation for OpenTelemetry" -homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" -repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" -readme = "README.md" -categories = [ - "development-tools::debugging", - "development-tools::profiling", - "asynchronous", -] -keywords = ["opentelemetry", "zipkin", "tracing", "async"] -license = "Apache-2.0" -edition = "2021" -rust-version = "1.65" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[dependencies] -opentelemetry = { version = "0.21", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", default-features = false, features = ["trace"] } -opentelemetry-proto = { version = "0.4", path = "../opentelemetry-proto", features = ["zpages", "gen-tonic", "with-serde"], default-features = false } -async-channel = "1.6" -futures-channel = "0.3" -futures-util = { version = "0.3", default-features = false, features = ["std"] } -serde = "1.0" -serde_json = "1.0" - -[dev-dependencies] -tokio = { version = "1.0", features = ["macros", "rt"] } -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["trace", "testing"] } -rand = "0.8" -hyper = { version = "0.14", features = ["full"] } - -[[example]] -name = "zpages" -path = "examples/zpages.rs" diff --git a/opentelemetry-zpages/DESIGN.md b/opentelemetry-zpages/DESIGN.md deleted file mode 100644 index 1619959951..0000000000 --- a/opentelemetry-zpages/DESIGN.md +++ /dev/null @@ -1,60 +0,0 @@ -# Design proposal - -## Problem statement -> zPages are an in-process alternative to external exporters. When included, they collect and aggregate tracing and metrics information in the background; this data is served on web pages when requested. - -As noted in [Opentelemetry zPages spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/experimental/trace/zpages.md). zPages is a tool to help diagnose the application issues as well as the instrument issues without a external service. - -There are several types of zPages defined in spec. Currently, we will only implement the tracez - -## Prior arts -Many language clients in OpenTelemetry already implement at least part of the zpages service like [Cpp](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/ext/src/zpages/README.md). - -## Overall design -
-Diagram - -``` - ┌─────────────────────────┐ ┌────────────────────────┐ - │ │ ZPage Message│ │ -┌────────┐Regiser │ ZPage Span Processor ├──────────────► Span Aggregator │ -│ Span ├────────► │ │ │ -└────────┘ └─────────────────────────┘ └───────────▲────────────┘ - │ - ┌─────────────────────────┐ │ - │ │ │ - │ Web Server │ │ - │ │ │ - │ ┌─────────────────┐ │ ZPage Query │ - │ │ Serilizer │ ├──────────────────────────┘ - │ │ │ │ - │ └─────────────────┘ │ - │ │ - │ │ - └─────────────────────────┘ -``` -
- -### ZPages Span Processor -This struct is needed mainly to integrate the existing tracing API. Most of its work will be delegated to `Span Aggregator`. This struct will implement `Span Processor` and `Tracez` trait. - -### Span Aggregator -The Span aggregator will maintain a internal data storage to allow users track: -1. The number of current running spans. -2. The number of errored spans. -3. The number of spans in different latency buckets. -4. Current running spans examples. -5. Error spans examples. -6. Span examples with different run times distributed in 9 buckets. - -The span aggregator should maintain a worker loop to handle the messages from the zpages span processor and web server. This worker loop should be non-blocking, so the zpages span processor will not block the span export at any point. - - -## Design ideas -1. Span aggregator embedded into zpages span processor - -One alternative choice other than using channels is to embed into the span aggregator. Then when span starts, span ends or there is an incoming http requests. We can lock the span aggregator to change the state. - -However, using this approach will block the `on_start` or `on_end` methods of zpages span processor if the span aggregator is working on serving a http request, which will further block the span processor chain to move forward when span ends. - -This approach could have avoided the cloning when span starts. But unfortunately current span API doesn't allow us to get the span name without clone the `Span` into a `SpanData` object. Thus, the cloning cannot be avoided even if we embed the span aggregator into zpages** span processor. diff --git a/opentelemetry-zpages/LICENSE b/opentelemetry-zpages/LICENSE deleted file mode 100644 index 23a2acabc4..0000000000 --- a/opentelemetry-zpages/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 The OpenTelemetry Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/opentelemetry-zpages/README.md b/opentelemetry-zpages/README.md deleted file mode 100644 index b05ec7c98a..0000000000 --- a/opentelemetry-zpages/README.md +++ /dev/null @@ -1,23 +0,0 @@ -![OpenTelemetry — An observability framework for cloud-native software.][splash] - -[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/master/assets/logo-text.png - -# OpenTelemetry ZPages - -ZPages server written in Rust - -[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amaster) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) - -## Overview - -zPages are an in-process alternative to external exporters. When included, they collect and aggregate tracing and metrics information in the background; this data is served on web pages or APIs when requested. - -This crate is still working in progress. Please find its current limitations below. - -Note that this crate is still in **experimental** state. Breaking changes can still happen. Some features may still in development. - -## Tracez - -Tracez shows information on tracing, including aggregation counts for latency, running, and errors for spans grouped by the span name. - diff --git a/opentelemetry-zpages/examples/README.md b/opentelemetry-zpages/examples/README.md deleted file mode 100644 index 9148b83186..0000000000 --- a/opentelemetry-zpages/examples/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# ZPages Example - -In this example, we demonstrate how to use zpages to analysis spans. - -Run the following command to start the server on `localhost:3000` -```base -cargo run --example zpages -``` - -1. Then try to access `localhost:3000/running` endpoint. Each request sent to this endpoint will generate a trace whose latency is between 1 ms to 5 s. The latency for each trace will be printed in cmd. - -2. Check `localhost:3000/api/tracez/aggregations` to see the count of running spans, error spans and spans within different latency. \ No newline at end of file diff --git a/opentelemetry-zpages/examples/zpages.rs b/opentelemetry-zpages/examples/zpages.rs deleted file mode 100644 index 457a8c8d4c..0000000000 --- a/opentelemetry-zpages/examples/zpages.rs +++ /dev/null @@ -1,110 +0,0 @@ -use hyper::http::{Request, Response}; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Server}; -use opentelemetry::trace::Tracer; -use opentelemetry::{ - global, - trace::{Span, Status}, -}; -use opentelemetry_sdk::runtime::Tokio; -use opentelemetry_sdk::trace::TracerProvider; -use opentelemetry_zpages::{tracez, TracezError, TracezQuerier, TracezResponse}; -use rand::Rng; -use std::str::FromStr; -use std::sync::Arc; -use std::{convert::Infallible, net::SocketAddr}; -use tokio::time::Duration; - -async fn handler( - req: Request, - querier: Arc, -) -> Result, Infallible> { - Ok::<_, Infallible>(match req.uri().path() { - uri if uri.starts_with("/tracez/api") => { - // if it is api call - let parts = uri - .split('/') - .filter(|x| !x.is_empty()) - .collect::>(); - if parts.len() < 3 { - Response::builder().status(404).body(Body::empty()).unwrap() - } else { - let operation_name = *(parts.get(2).unwrap_or(&"")); - match operation_name { - "aggregations" => tracez_response_or_server_error(querier.aggregation().await), - "running" => { - if let Some(&span_name) = parts.get(3) { - tracez_response_or_server_error(querier.running(span_name.into()).await) - } else { - Response::builder().status(404).body(Body::empty()).unwrap() - } - } - "error" => { - if let Some(&span_name) = parts.get(3) { - tracez_response_or_server_error(querier.error(span_name.into()).await) - } else { - Response::builder().status(404).body(Body::empty()).unwrap() - } - } - "latency" => { - let bucket_index = parts.get(3); - let span_name = parts.get(4); - match (bucket_index, span_name) { - (Some(&bucket_index), Some(&span_name)) => { - if let Ok(bucket_index) = u32::from_str(bucket_index) { - tracez_response_or_server_error( - querier - .latency(bucket_index as usize, span_name.into()) - .await, - ) - } else { - Response::builder().status(404).body(Body::empty()).unwrap() - } - } - (_, _) => Response::builder().status(404).body(Body::empty()).unwrap(), - } - } - _ => Response::builder().status(404).body(Body::empty()).unwrap(), - } - } - } - "/running" => { - let span_duration = Duration::from_millis(rand::thread_rng().gen_range(1..6000)); - let mut spans = global::tracer("zpages-test").start("running-spans"); - spans.set_status(Status::Ok); - tokio::time::sleep(span_duration).await; - println!("The span slept for {} ms", span_duration.as_millis()); - Response::new(Body::empty()) - } - _ => Response::builder().status(404).body(Body::empty()).unwrap(), - }) -} - -fn tracez_response_or_server_error(resp: Result) -> Response { - match resp { - Ok(resp) => Response::new(Body::from(serde_json::to_string(&resp).unwrap())), - Err(_) => Response::builder().status(500).body(Body::empty()).unwrap(), - } -} - -#[tokio::main] -async fn main() { - let (processor, querier) = tracez(5, Tokio); - let provider = TracerProvider::builder() - .with_span_processor(processor) - .build(); - global::set_tracer_provider(provider); - let querier = Arc::new(querier); - - let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); - - let server = Server::bind(&addr).serve(make_service_fn(move |_conn| { - let inner = Arc::clone(&querier); - async move { Ok::<_, Infallible>(service_fn(move |req| handler(req, Arc::clone(&inner)))) } - })); - - println!("Listening on {addr}"); - if let Err(e) = server.await { - eprintln!("server error: {e}"); - } -} diff --git a/opentelemetry-zpages/src/lib.rs b/opentelemetry-zpages/src/lib.rs deleted file mode 100644 index 7842c10d98..0000000000 --- a/opentelemetry-zpages/src/lib.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! ZPages implementation for Opentelemetry -//! -//! # Overview -//! zPages are an in-process alternative to external exporters. When included, -//! they collect and aggregate tracing and metrics information in the -//! background; this data is served on web pages or APIs when requested. -//! -//! Currently only tracez components are available. And some of those are still -//! work in progress. Known limitation includes -//! - The sampled running span doesn't reflect the changes made to the span. -//! - The API only returns the json response. -//! - Users have to build their own http server from the components provided. -//! -//! # Get start -//! The first step is to initiate the [`ZPagesSpanProcessor`] and install it in [`TracerProvider`]. -//! -//! ```no_run -//! # use opentelemetry_zpages::tracez; -//! # use opentelemetry::{global, trace::Tracer}; -//! # use opentelemetry_sdk::{runtime::Tokio, trace::TracerProvider}; -//! # use std::sync::Arc; -//! -//! # fn main() { -//! let (processor, querier) = tracez(5, Tokio); -//! let provider = TracerProvider::builder() -//! .with_span_processor(processor) -//! .build(); -//! global::set_tracer_provider(provider); -//! # } -//! ``` -//! -//! Once the [`ZPagesSpanProcessor`] installed. It will record spans when they -//! start or end. -//! -//! Users can then use the [`TracezQuerier`] to query the aggregated span information. -//! -//! A detailed example can also be founded [here]. -//! -//! -//! [`ZPagesSpanProcessor`]: trace::span_processor::ZPagesSpanProcessor -//! [`TracerProvider`]: opentelemetry_sdk::trace::TracerProvider -//! [here]: https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/zpages -#![warn( - future_incompatible, - missing_debug_implementations, - missing_docs, - nonstandard_style, - rust_2018_idioms, - unreachable_pub, - unused -)] -#![allow(elided_lifetimes_in_paths)] -#![cfg_attr( - docsrs, - feature(doc_cfg, doc_auto_cfg), - deny(rustdoc::broken_intra_doc_links) -)] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/master/assets/logo.svg" -)] -#![cfg_attr(test, deny(warnings))] - -use trace::span_queue::SpanQueue; - -mod trace; - -pub use trace::{ - span_processor::ZPagesSpanProcessor, tracez, TracezError, TracezQuerier, TracezResponse, -}; diff --git a/opentelemetry-zpages/src/trace/aggregator.rs b/opentelemetry-zpages/src/trace/aggregator.rs deleted file mode 100644 index 2615c17ba6..0000000000 --- a/opentelemetry-zpages/src/trace/aggregator.rs +++ /dev/null @@ -1,447 +0,0 @@ -//! ## Span Aggregator -//! -//! Process the span information, aggregate counts for latency, running, and errors for spans grouped -//! by name. -use crate::trace::{TracezError, TracezMessage, TracezQuery, TracezResponse}; -use crate::SpanQueue; -use async_channel::Receiver; -use futures_util::StreamExt as _; -use opentelemetry::trace::Status; -use opentelemetry_proto::tonic::tracez::v1::TracezCounts; -use opentelemetry_sdk::export::trace::SpanData; -use std::collections::HashMap; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -const LATENCY_BUCKET: [Duration; 9] = [ - Duration::from_micros(0), - Duration::from_micros(10), - Duration::from_micros(100), - Duration::from_millis(1), - Duration::from_millis(10), - Duration::from_millis(100), - Duration::from_secs(1), - Duration::from_secs(10), - Duration::from_secs(100), -]; -const LATENCY_BUCKET_COUNT: usize = 9; - -/// Aggregate span information from `ZPagesSpanProcessor` and feed that information to server when -/// requested. -#[derive(Debug)] -pub(crate) struct SpanAggregator { - receiver: Receiver, - summaries: HashMap, - sample_size: usize, -} - -impl SpanAggregator { - /// Create a span aggregator - pub(crate) fn new(receiver: Receiver, sample_size: usize) -> SpanAggregator { - SpanAggregator { - receiver, - summaries: HashMap::new(), - sample_size, - } - } - - /// Process request from http server or the span processor. - pub(crate) async fn process(&mut self) { - let sample_size = self.sample_size; - loop { - match self.receiver.next().await { - None => { - // all senders have been dropped. Thus, close it - self.receiver.close(); - return; - } - Some(msg) => { - match msg { - TracezMessage::ShutDown => { - self.receiver.close(); - return; - } - TracezMessage::SpanEnd(span) => { - let summary = self - .summaries - .entry(span.name.clone().into()) - .or_insert_with(|| SpanSummary::new(sample_size)); - - summary.running.remove(span.span_context.clone()); - - if matches!(span.status, Status::Error { .. }) { - summary.error.push_back(span); - } else { - let latency_idx = latency_bucket(span.start_time, span.end_time); - if let Some(queue) = summary.latencies.get_mut(latency_idx) { - queue.push_back(span) - } - } - } - TracezMessage::SampleSpan(span) => { - // Resample span whenever there is a new span starts. - // - // This helps us clean the stale span that failed to be evicted because - // of the failure to deliver the span end messages. - let summary = self - .summaries - .entry(span.name.clone().into()) - .or_insert_with(|| SpanSummary::new(sample_size)); - summary.running.push_back(span) - } - TracezMessage::Query { query, response_tx } => { - let result = self.handle_query(query); - let _ = response_tx.send(result); - } - } - } - } - } - } - - fn handle_query(&mut self, query: TracezQuery) -> Result { - match query { - TracezQuery::Aggregation => Ok(TracezResponse::Aggregation( - self.summaries - .iter() - .map(|(span_name, summary)| TracezCounts { - spanname: span_name.clone(), - latency: summary - .latencies - .iter() - .map(|queue| queue.count() as u32) - .collect(), - running: summary.running.count() as u32, - error: summary.error.count() as u32, - }) - .collect(), - )), - TracezQuery::Latency { - bucket_index, - span_name, - } => self - .summaries - .get(&span_name) - .ok_or(TracezError::NotFound { - api: "tracez/api/latency/{bucket_index}/{span_name}", - }) - .and_then(|summary| { - summary - .latencies - .get(bucket_index) - .ok_or(TracezError::InvalidArgument { - api: "tracez/api/latency/{bucket_index}/{span_name}", - message: "invalid bucket index", - }) - .map(|queue| TracezResponse::Latency(queue.clone().into())) - }), - TracezQuery::Error { span_name } => self - .summaries - .get(&span_name) - .ok_or(TracezError::NotFound { - api: "tracez/api/error/{span_name}", - }) - .map(|summary| TracezResponse::Error(summary.error.clone().into())), - TracezQuery::Running { span_name } => self - .summaries - .get(&span_name) - .ok_or(TracezError::NotFound { - api: "tracez/api/error/{span_name}", - }) - .map(|summary| TracezResponse::Running(summary.running.clone().into())), - } - } -} - -fn latency_bucket(start_time: SystemTime, end_time: SystemTime) -> usize { - let latency = end_time - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_millis(0)) - - start_time - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_millis(0)); - for (idx, lower) in LATENCY_BUCKET.iter().copied().enumerate().skip(1) { - if lower > latency { - return idx - 1; - } - } - LATENCY_BUCKET.len() - 1 -} - -#[derive(Debug)] -struct SpanSummary { - running: SpanQueue, - error: SpanQueue, - latencies: Vec, -} - -impl SpanSummary { - fn new(sample_size: usize) -> SpanSummary { - SpanSummary { - running: SpanQueue::new(sample_size), - error: SpanQueue::new(sample_size), - latencies: vec![SpanQueue::new(sample_size); LATENCY_BUCKET_COUNT], - } - } -} - -impl> From for Vec { - fn from(span_queue: SpanQueue) -> Self { - span_queue.spans().into_iter().map(Into::into).collect() - } -} - -#[cfg(test)] -mod tests { - use crate::trace::{ - aggregator::{SpanAggregator, LATENCY_BUCKET_COUNT}, - span_queue::SpanQueue, - TracezMessage, - }; - use opentelemetry::trace::{SpanContext, SpanId, Status, TraceFlags, TraceId, TraceState}; - use opentelemetry_sdk::{export::trace::SpanData, testing::trace::new_test_export_span_data}; - use std::borrow::Cow; - use std::cmp::min; - use std::time::{Duration, SystemTime}; - - enum Action { - Start, - End(Duration), // end with latency - } - - struct ProcessTestPlan { - // (trace id, span id, trace flag, is error) - input: Vec<(u128, u64, u8, bool, Action)>, - // (trace id, span id, trace flag, is error) - expect_running: Vec<(u128, u64, u8, bool)>, - // (trace id, span id, trace flag, is error) - expect_error: Vec<(u128, u64, u8, bool)>, - // (index of the latency bucket, trace id, span id, trace flag, is error) - expect_latencies: Vec<(usize, u128, u64, u8, bool)>, - // name of the test plan - name: &'static str, - } - - impl ProcessTestPlan { - pub(crate) fn get_expect_running(&self) -> Vec { - self.expect_running - .iter() - .cloned() - .map(|(trace_id, span_id, trace_flag, is_error)| { - span_data(trace_id, span_id, trace_flag, is_error) - }) - .collect() - } - - pub(crate) fn get_expect_error(&self) -> Vec { - self.expect_error - .iter() - .cloned() - .map(|(trace_id, span_id, trace_flag, is_error)| { - span_data(trace_id, span_id, trace_flag, is_error) - }) - .collect() - } - - pub(crate) fn get_latencies(&self) -> Vec> { - let mut sink = vec![Vec::new(); LATENCY_BUCKET_COUNT]; - for (index, trace_id, span_id, trace_flag, is_error) in self.expect_latencies.clone() { - sink.get_mut(index) - .unwrap() - .push(span_data(trace_id, span_id, trace_flag, is_error)) - } - sink - } - - pub(crate) fn get_input(&self) -> (Vec, Vec) { - let mut start_spans = Vec::new(); - let mut end_spans = Vec::new(); - let start_time = SystemTime::now(); - for input in &self.input { - let mut span_data = span_data(input.0, input.1, input.2, input.3); - match input.4 { - Action::Start => { - span_data.start_time = start_time; - start_spans.push(span_data); - } - Action::End(duration) => { - span_data.start_time = start_time; - span_data.end_time = start_time.checked_add(duration).unwrap(); - end_spans.push(span_data); - } - } - } - (start_spans, end_spans) - } - } - - fn span_data(trace_id: u128, span_id: u64, trace_flag: u8, is_error: bool) -> SpanData { - let mut span_data = new_test_export_span_data(); - span_data.span_context = SpanContext::new( - TraceId::from_u128(trace_id), - SpanId::from_u64(span_id), - TraceFlags::new(trace_flag), - true, - TraceState::default(), - ); - span_data.name = Cow::from("test-service"); - span_data.status = { - if is_error { - Status::error("") - } else { - Status::Ok - } - }; - span_data - } - - #[tokio::test] - async fn test_span_aggregator() -> Result<(), Box> { - const SAMPLE_SIZE: usize = 5; - let test_cases = vec![ - ProcessTestPlan { - name: "start and end", - input: vec![ - (1, 1, 0, false, Action::Start), - (1, 1, 0, false, Action::End(Duration::from_millis(2))), - ], - expect_running: vec![], - expect_error: vec![], - expect_latencies: vec![(3, 1, 1, 0, false)], - }, - ProcessTestPlan { - name: "start and end with error", - input: vec![ - (1, 1, 0, false, Action::Start), - (1, 1, 0, true, Action::End(Duration::from_millis(2))), - ], - expect_latencies: vec![], - expect_error: vec![(1, 1, 0, true)], - expect_running: vec![], - }, - ProcessTestPlan { - name: "start but not finish", - input: vec![ - (1, 2, 0, false, Action::Start), - (1, 1, 0, false, Action::Start), - (1, 2, 0, false, Action::End(Duration::from_secs(6))), - ], - expect_running: vec![(1, 1, 0, false)], - expect_error: vec![], - expect_latencies: vec![(6, 1, 2, 0, false)], - }, - ProcessTestPlan { - name: "accept spans without started record", - input: vec![(1, 1, 0, false, Action::End(Duration::from_secs(6)))], - expect_latencies: vec![(6, 1, 1, 0, false)], - expect_running: vec![], - expect_error: vec![], - }, - ProcessTestPlan { - name: "evicted spans if the queue is filled", - input: { - let mut input = Vec::with_capacity((SAMPLE_SIZE + 1) * 2); - for i in 0..SAMPLE_SIZE + 1 { - input.push((1, i as u64 + 1, 0, false, Action::Start)); - input.push(( - 1, - i as u64 + 1, - 0, - false, - Action::End(Duration::from_secs(3)), - )); - } - input - }, - expect_latencies: { - let mut latencies = Vec::with_capacity(SAMPLE_SIZE + 1); - for i in 0..SAMPLE_SIZE + 1 { - latencies.push((6, 1, i as u64 + 1, 0, false)); - } - latencies - }, - expect_running: vec![], - expect_error: vec![], - }, - ]; - - let assert_span_queue = |span_queue: &SpanQueue, expected: Vec, msg: String| { - assert_eq!(span_queue.len(), min(SAMPLE_SIZE, expected.len())); - for collected_span in span_queue.clone().spans() { - assert!( - expected - .iter() - .any(|expected_span| collected_span.span_context - == expected_span.span_context - && collected_span.status == expected_span.status), - "{}", - msg - ) - } - }; - - for plan in test_cases { - let running = plan.get_expect_running(); - let error = plan.get_expect_error(); - let latencies = plan.get_latencies(); - let plan_name = plan.name.to_string(); - - let (sender, receiver) = async_channel::unbounded(); - let mut aggregator = SpanAggregator::new(receiver, SAMPLE_SIZE); - - let handle = tokio::spawn(async move { - aggregator.process().await; - - assert_ne!(aggregator.summaries.len(), 0); - let summary = aggregator - .summaries - .get::(&"test-service".to_string()) - .unwrap(); - - assert_span_queue( - &summary.running, - running, - format!( - "{} fails because the running status is not expected", - plan_name - ), - ); - assert_span_queue( - &summary.error, - error, - format!( - "{} fails because the error status is not expected", - plan_name - ), - ); - // check the result lengths are expected - - for (index, expected) in (0..LATENCY_BUCKET_COUNT).zip(latencies) { - assert_span_queue( - summary.latencies.get(index).unwrap(), - expected, - format!( - "{} fails because the latency status with index {} is not expected", - plan_name, index, - ), - ); - } - }); - - let (start_spans, end_spans) = plan.get_input(); - - for span in start_spans.into_iter() { - sender.send(TracezMessage::SampleSpan(span)).await?; - } - - for span in end_spans.into_iter() { - sender.send(TracezMessage::SpanEnd(span)).await?; - } - - sender.send(TracezMessage::ShutDown).await?; - - handle.await?; - } - - Ok(()) - } -} diff --git a/opentelemetry-zpages/src/trace/mod.rs b/opentelemetry-zpages/src/trace/mod.rs deleted file mode 100644 index 4e455934f9..0000000000 --- a/opentelemetry-zpages/src/trace/mod.rs +++ /dev/null @@ -1,293 +0,0 @@ -//! Tracez implementation -//! -use async_channel::{SendError, Sender}; -use futures_channel::oneshot::{self, Canceled}; -use opentelemetry_proto::tonic::tracez::v1::{ErrorData, LatencyData, RunningData, TracezCounts}; -use opentelemetry_sdk::{export::trace::SpanData, runtime::Runtime}; -use serde::ser::SerializeSeq; -use serde::Serializer; -use std::fmt::Formatter; -use std::sync::Arc; - -mod aggregator; -pub(crate) mod span_processor; -pub(crate) mod span_queue; - -/// Create tracez components. This function will return a [`ZPagesSpanProcessor`] that should be installed -/// into the [`TracerProvider`] and a [`TracezQuerier`] for http server to access the aggregated -/// information on spans. -/// -/// The `sample_size` config how may spans to sample for each unique span name. -/// -/// [`ZPagesSpanProcessor`]: span_processor::ZPagesSpanProcessor -/// [`TracerProvider`]: opentelemetry_sdk::trace::TracerProvider -/// -/// ## Example -/// ```no_run -/// # use opentelemetry_zpages::tracez; -/// # use opentelemetry::{global, trace::Tracer}; -/// # use opentelemetry_sdk::{runtime::Tokio, trace::TracerProvider}; -/// # use std::sync::Arc; -/// # fn main() { -/// let (processor, querier) = tracez(5, Tokio); // sample 5 spans for each unique span name -/// let provider = TracerProvider::builder() -/// .with_span_processor(processor) -/// .build(); -/// global::set_tracer_provider(provider); -/// -/// // use querier to retrieve the aggregated span information -/// # } -/// -/// ``` -pub fn tracez( - sample_size: usize, - runtime: R, -) -> (span_processor::ZPagesSpanProcessor, TracezQuerier) { - let (tx, rx) = async_channel::unbounded(); - let span_processor = span_processor::ZPagesSpanProcessor::new(tx.clone()); - let mut aggregator = aggregator::SpanAggregator::new(rx, sample_size); - runtime.spawn(Box::pin(async move { - aggregator.process().await; - })); - (span_processor, TracezQuerier(Arc::new(tx))) -} - -/// Message that used to pass commend between web servers, aggregators and span processors. -pub enum TracezMessage { - /// Sample span on start - SampleSpan(SpanData), - /// Span ended - SpanEnd(SpanData), - /// Shut down the aggregator - ShutDown, - /// Run a query from the web service - Query { - /// Query content - query: TracezQuery, - /// Channel to send the response - response_tx: oneshot::Sender>, - }, -} - -impl std::fmt::Debug for TracezMessage { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &self { - TracezMessage::SampleSpan(_) => f.write_str("span starts"), - TracezMessage::SpanEnd(_) => f.write_str("span ends"), - TracezMessage::ShutDown => f.write_str("shut down"), - TracezMessage::Query { .. } => f.write_str("query aggregation results"), - } - } -} - -/// Tracez APIs. -/// As defined in [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/experimental/trace/zpages.md#http-server) -#[derive(Debug)] -pub enum TracezQuery { - /// tracez/api/aggregations - Aggregation, - /// tracez/api/latency/{bucket_index}/{span_name} - Latency { - /// index of the bucket in API path - bucket_index: usize, - /// span name in API path - span_name: String, - }, - /// tracez/api/running/{span_name} - Running { - /// span name in API path - span_name: String, - }, - /// tracez/api/error/{span_name} - Error { - /// span name in API path - span_name: String, - }, -} - -/// Tracez APIs' response -#[derive(Debug)] -pub enum TracezResponse { - /// tracez/api/aggregations - Aggregation(Vec), - /// tracez/api/latency/{bucket_index}/{span_name} - Latency(Vec), - /// tracez/api/running/{span_name} - Running(Vec), - /// tracez/api/error/{span_name} - Error(Vec), -} - -impl serde::Serialize for TracezResponse { - fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> - where - S: Serializer, - { - match self { - TracezResponse::Aggregation(data) => { - let mut list = serializer.serialize_seq(Some(data.len()))?; - for e in data { - list.serialize_element(e)?; - } - list.end() - } - TracezResponse::Latency(data) => { - let mut list = serializer.serialize_seq(Some(data.len()))?; - for e in data { - list.serialize_element(e)?; - } - list.end() - } - TracezResponse::Running(data) => { - let mut list = serializer.serialize_seq(Some(data.len()))?; - for e in data { - list.serialize_element(e)?; - } - list.end() - } - TracezResponse::Error(data) => { - let mut list = serializer.serialize_seq(Some(data.len()))?; - for e in data { - list.serialize_element(e)?; - } - list.end() - } - } - } -} - -/// Provide wrapper functions to query the aggregated span info. -// TracezQuerier creates the oneshot channel and send the TracezMessage to the SpanAggregator. -#[derive(Clone, Debug)] -pub struct TracezQuerier(Arc>); - -impl TracezQuerier { - /// Return the aggregation status for spans. - /// - /// The aggregation will contains the error, running and latency counts for all span name - /// groupings. - pub async fn aggregation(&self) -> Result { - let (tx, rx) = oneshot::channel(); - self.0 - .send(TracezMessage::Query { - query: TracezQuery::Aggregation, - response_tx: tx, - }) - .await?; - rx.await.map_err::(Into::into)? - } - - /// Return the sample spans for the given bucket index. - pub async fn latency( - &self, - bucket_index: usize, - span_name: String, - ) -> Result { - let (tx, rx) = oneshot::channel(); - self.0 - .send(TracezMessage::Query { - query: TracezQuery::Latency { - bucket_index, - span_name, - }, - response_tx: tx, - }) - .await?; - rx.await.map_err::(Into::into)? - } - - /// Return the sample running spans' snapshot. - /// - /// Note that current implementation cannot include the changes made to spans after the spans - /// started. For example, the events added or the links added. - pub async fn running(&self, span_name: String) -> Result { - let (tx, rx) = oneshot::channel(); - self.0 - .send(TracezMessage::Query { - query: TracezQuery::Running { span_name }, - response_tx: tx, - }) - .await?; - rx.await.map_err::(Into::into)? - } - - /// Return the sample spans with error status. - pub async fn error(&self, span_name: String) -> Result { - let (tx, rx) = oneshot::channel(); - self.0 - .send(TracezMessage::Query { - query: TracezQuery::Error { span_name }, - response_tx: tx, - }) - .await?; - rx.await.map_err::(Into::into)? - } -} - -impl Drop for TracezQuerier { - fn drop(&mut self) { - // shut down aggregator if it is still running - let _ = self.0.try_send(TracezMessage::ShutDown); - } -} - -/// Tracez API's error. -#[derive(Debug)] -pub enum TracezError { - /// There isn't a valid tracez operation for that API - InvalidArgument { - /// Describe the operation on the tracez - api: &'static str, - /// Error message - message: &'static str, - }, - /// Operation cannot be found - NotFound { - /// Describe the operation on the tracez - api: &'static str, - }, - /// Error when serialize the TracezResponse to json. - Serialization, - /// The span aggregator has been dropped. - AggregatorDropped, -} - -impl From for TracezError { - fn from(_: Canceled) -> Self { - TracezError::AggregatorDropped - } -} - -impl From> for TracezError { - fn from(_: SendError) -> Self { - // Since we employed a unbounded channel to send message to aggregator. - // The only reason why the send would return errors is the receiver has closed - // This should only happen if the span aggregator has been dropped. - TracezError::AggregatorDropped - } -} - -impl std::fmt::Display for TracezError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - TracezError::InvalidArgument { api: _, message } => f.write_str(message), - TracezError::NotFound { api: _ } => { - f.write_str("the requested resource is not founded") - } - TracezError::Serialization => f.write_str("cannot serialize the response into json"), - TracezError::AggregatorDropped => { - f.write_str("the span aggregator is already dropped when querying") - } - } - } -} - -impl TracezResponse { - /// Convert the `TracezResponse` into json. - /// - /// Throw a `TracezError` if the serialization fails. - #[cfg(feature = "with-serde")] - pub fn into_json(self) -> Result { - serde_json::to_string(&self).map_err(|_| TracezError::Serialization) - } -} diff --git a/opentelemetry-zpages/src/trace/span_processor.rs b/opentelemetry-zpages/src/trace/span_processor.rs deleted file mode 100644 index a9760d4f3f..0000000000 --- a/opentelemetry-zpages/src/trace/span_processor.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! ## zPages processor -//! -//! ZPages processor collect span information when span starts or ends and send it to [`SpanAggregator`] -//! for further process. -//! -//! [`SpanAggregator`]:../struct.SpanAggregator.html -use crate::trace::TracezMessage; -use async_channel::Sender; -use opentelemetry::{trace::TraceResult, Context}; -use opentelemetry_sdk::{ - export::trace::SpanData, - trace::{Span, SpanProcessor}, -}; -use std::fmt::Formatter; - -/// ZPagesSpanProcessor is an alternative to external exporters. It sends span data to zPages server -/// where it will be archive and user can use this information for debug purpose. -/// -/// ZPagesSpanProcessor employs a `SpanAggregator` running as another task to aggregate the spans -/// using the name of spans. -pub struct ZPagesSpanProcessor { - tx: Sender, -} - -impl std::fmt::Debug for ZPagesSpanProcessor { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("ZPageProcessor") - } -} - -impl ZPagesSpanProcessor { - /// Create a new `ZPagesSpanProcessor`. - pub fn new(tx: Sender) -> ZPagesSpanProcessor { - ZPagesSpanProcessor { tx } - } -} - -impl SpanProcessor for ZPagesSpanProcessor { - fn on_start(&self, span: &mut Span, _cx: &Context) { - // if the aggregator is already dropped. This is a no-op - if let Some(data) = span.exported_data() { - let _ = self.tx.try_send(TracezMessage::SampleSpan(data)); - } - } - - fn on_end(&self, span: SpanData) { - // if the aggregator is already dropped. This is a no-op - let _ = self.tx.try_send(TracezMessage::SpanEnd(span)); - } - - fn force_flush(&self) -> TraceResult<()> { - // do nothing - Ok(()) - } - - fn shutdown(&mut self) -> TraceResult<()> { - // do nothing - Ok(()) - } -} diff --git a/opentelemetry-zpages/src/trace/span_queue.rs b/opentelemetry-zpages/src/trace/span_queue.rs deleted file mode 100644 index f9fc2bd1bc..0000000000 --- a/opentelemetry-zpages/src/trace/span_queue.rs +++ /dev/null @@ -1,225 +0,0 @@ -//! # Span Queue - -use opentelemetry::trace::SpanContext; -use opentelemetry_sdk::export::trace::SpanData; -#[cfg(feature = "serialize")] -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -/// This queue maintains an ordered list of elements, Elements are -/// removed from the queue in a first in first out fashion. -#[derive(Clone, Debug)] -pub(crate) struct SpanQueue { - // We can't really use the opentelemetry::EvictedQueue here because - // we need to compare the SpanData based on their span context - // rather than all fields. Thus, we cannot use SpanData's default - // equal function as it compares all fields. - - // All operation within SpanQueue should be O(1) - queue: Vec, - map: HashMap, - next_idx: usize, - capacity: usize, - count: usize, -} - -impl PartialEq for SpanQueue { - fn eq(&self, other: &Self) -> bool { - self.queue.eq(&other.queue) && self.next_idx == other.next_idx - } -} - -impl SpanQueue { - /// Create a new `SpanQueue` with a given max length. - pub(crate) fn new(max_len: usize) -> Self { - SpanQueue { - queue: Vec::with_capacity(max_len), - next_idx: 0, - map: HashMap::with_capacity(max_len), - capacity: max_len, - count: 0, - } - } - - /// Push a new element to the back of the queue - /// If the queue is filled. Replace the left most element inside the queue. - pub(crate) fn push_back(&mut self, value: SpanData) { - self.next_idx %= self.capacity; - self.map.insert(value.span_context.clone(), self.next_idx); - match self.queue.get_mut(self.next_idx) { - Some(ele) => { - self.map.remove(&ele.span_context); - *ele = value; - } - None => { - self.queue.push(value); - } - } - self.count += 1; - self.next_idx += 1; - } - - /// Returns the number of sampled spans in the `SpanQueue`. - #[allow(unused)] // used in testing - pub(crate) fn len(&self) -> usize { - self.queue.len() - } - - /// Return the count of spans in the `SpanQueue`. - /// - /// The count tracks the total number of spans instead of the number of sampled spans. - /// Use `len` function for the current number of sampled spans. - /// - /// The count will add 1 whenever the `push_back` function is called and - /// decrease 1 whenever the `remove` function is called. - pub(crate) fn count(&self) -> usize { - self.count - } - - /// Remove one element if exist. - pub(crate) fn remove(&mut self, span_context: SpanContext) -> Option { - self.count = self.count.saturating_sub(1); - if !self.map.contains_key(&span_context) { - None - } else { - self.next_idx = self.queue.len() - 1; - let idx = *(self.map.get(&span_context).unwrap()); - if idx == self.queue.len() - 1 { - // if it's last element, just remove - self.map.remove(&span_context); - Some(self.queue.remove(idx)) - } else { - let last_span_context = self.queue.last().unwrap().span_context.clone(); - self.map.remove(&span_context); - self.map.insert(last_span_context, idx); - Some(self.queue.swap_remove(idx)) - } - } - } - - /// Return all spans it currently hold - pub(crate) fn spans(self) -> Vec { - self.queue.into_iter().collect() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use opentelemetry::trace::{SpanId, TraceFlags, TraceId, TraceState}; - use opentelemetry_sdk::testing::trace::new_test_export_span_data; - use std::time::SystemTime; - - enum Action { - PushBack(u128, u64), - Remove(u128, u64), - } - - // If the expected is None, means we skip this check in this test plan. - #[derive(Default)] - struct TestPlan { - max_len: usize, - actions: Vec, - expected_next_idx: Option, - expected_queue: Option>, - expected_len: Option, - } - - #[test] - fn test_span_queue() { - let get_span_context = |trace_id: u128, span_id: u64| { - SpanContext::new( - TraceId::from_u128(trace_id), - SpanId::from_u64(span_id), - TraceFlags::new(0), - false, - TraceState::default(), - ) - }; - let time = SystemTime::now(); - let get_span_data = |trace_id: u128, span_id: u64| { - let mut span_data = new_test_export_span_data(); - span_data.span_context = get_span_context(trace_id, span_id); - span_data.start_time = time; - span_data.end_time = time; - span_data - }; - let plans = vec![ - TestPlan { - max_len: 3, - actions: vec![ - Action::PushBack(1, 1), - Action::PushBack(1, 2), - Action::PushBack(1, 3), - Action::PushBack(1, 4), - ], - expected_next_idx: Some(1), - expected_len: Some(3), - expected_queue: Some(vec![(1, 4), (1, 2), (1, 3)]), - }, - TestPlan { - max_len: 3, - actions: vec![ - Action::PushBack(1, 3), - Action::PushBack(2, 2), - Action::PushBack(1, 4), - Action::PushBack(1, 5), - Action::Remove(1, 3), - Action::Remove(1, 4), - ], - expected_queue: Some(vec![(1, 5), (2, 2)]), - expected_next_idx: Some(2), - expected_len: Some(2), - }, - TestPlan { - max_len: 3, - actions: vec![ - Action::PushBack(1, 1), - Action::Remove(1, 3), - Action::Remove(1, 4), - Action::PushBack(1, 3), - Action::Remove(1, 1), - Action::Remove(1, 3), - ], - expected_len: Some(0), - expected_next_idx: Some(0), - expected_queue: Some(vec![]), - }, - ]; - - for plan in plans { - let mut span_queue = SpanQueue::new(plan.max_len); - for action in plan.actions { - match action { - Action::PushBack(trace_id, span_id) => { - span_queue.push_back(get_span_data(trace_id, span_id)); - } - Action::Remove(trace_id, span_id) => { - span_queue.remove(get_span_context(trace_id, span_id)); - } - } - } - if let Some(next_id) = plan.expected_next_idx { - assert_eq!(span_queue.next_idx, next_id); - } - if let Some(len) = plan.expected_len { - assert_eq!(span_queue.len(), len); - } - if let Some(queue) = plan.expected_queue { - assert_eq!( - span_queue.queue, - queue - .iter() - .cloned() - .map(|(trace_id, span_id)| get_span_data(trace_id, span_id)) - .collect::>() - ); - assert_eq!(span_queue.map.len(), queue.len()); - for (idx, (trace_id, span_id)) in queue.into_iter().enumerate() { - let span_context = get_span_context(trace_id, span_id); - assert_eq!(span_queue.map.get(&span_context).copied(), Some(idx)); - } - } - } - } -} diff --git a/scripts/test.sh b/scripts/test.sh index e2a4b0a0b7..fe7996f629 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -11,7 +11,6 @@ cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -- --ignored --test-threads=1 cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -cargo test --manifest-path=opentelemetry-contrib/Cargo.toml --all-features cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --all-features cargo test --manifest-path=opentelemetry-otlp/Cargo.toml --features "trace,grpc-sys" --no-default-features cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml --all-features From 41c6ea89565abacd1a9153b290d699120f3b9647 Mon Sep 17 00:00:00 2001 From: "K.J. Valencik" Date: Tue, 14 Nov 2023 17:28:55 -0500 Subject: [PATCH 27/68] Use custom channel in exporters if configured (#1335) --- opentelemetry-otlp/CHANGELOG.md | 1 + opentelemetry-otlp/src/exporter/tonic/mod.rs | 70 +++++++++++--------- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 4a23e000e1..c324dd0ead 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -9,6 +9,7 @@ - Add `build_{signal}_exporter` methods to client builders (#1187) - Add `grpcio` metrics exporter (#1202) - Allow specifying OTLP HTTP headers from env variable (#1290) +- Support custom channels in topic exporters [#1335](https://github.com/open-telemetry/opentelemetry-rust/pull/1335) ### Changed diff --git a/opentelemetry-otlp/src/exporter/tonic/mod.rs b/opentelemetry-otlp/src/exporter/tonic/mod.rs index 5379380772..30840157be 100644 --- a/opentelemetry-otlp/src/exporter/tonic/mod.rs +++ b/opentelemetry-otlp/src/exporter/tonic/mod.rs @@ -208,15 +208,44 @@ impl TonicExporterBuilder { } fn build_channel( - &mut self, + self, signal_endpoint_var: &str, signal_endpoint_path: &str, signal_timeout_var: &str, signal_compression_var: &str, ) -> Result<(Channel, BoxInterceptor, Option), crate::Error> { - let config = &mut self.exporter_config; - let tonic_config: &mut TonicConfig = &mut self.tonic_config; + let tonic_config = self.tonic_config; + let compression = resolve_compression(&tonic_config, signal_compression_var)?; + let metadata = tonic_config.metadata.unwrap_or_default(); + let add_metadata = move |mut req: tonic::Request<()>| { + for key_and_value in metadata.iter() { + match key_and_value { + KeyAndValueRef::Ascii(key, value) => { + req.metadata_mut().append(key, value.to_owned()) + } + KeyAndValueRef::Binary(key, value) => { + req.metadata_mut().append_bin(key, value.to_owned()) + } + }; + } + + Ok(req) + }; + + let interceptor = match self.interceptor { + Some(mut interceptor) => { + BoxInterceptor(Box::new(move |req| interceptor.call(add_metadata(req)?))) + } + None => BoxInterceptor(Box::new(add_metadata)), + }; + + // If a custom channel was provided, use that channel instead of creating one + if let Some(channel) = self.channel { + return Ok((channel, interceptor, compression)); + } + + let config = self.exporter_config; let endpoint = match env::var(signal_endpoint_var) .ok() .or(env::var(OTEL_EXPORTER_OTLP_ENDPOINT).ok()) @@ -225,6 +254,7 @@ impl TonicExporterBuilder { None => format!("{}{signal_endpoint_path}", config.endpoint), }; + let endpoint = Channel::from_shared(endpoint).map_err(crate::Error::from)?; let timeout = match env::var(signal_timeout_var) .ok() .or(env::var(OTEL_EXPORTER_OTLP_TIMEOUT).ok()) @@ -235,12 +265,9 @@ impl TonicExporterBuilder { }, None => config.timeout, }; - let compression = resolve_compression(tonic_config, signal_compression_var)?; - - let endpoint = Channel::from_shared(endpoint).map_err(crate::Error::from)?; #[cfg(feature = "tls")] - let channel = match tonic_config.tls_config.take() { + let channel = match tonic_config.tls_config { Some(tls_config) => endpoint .tls_config(tls_config) .map_err(crate::Error::from)?, @@ -252,36 +279,13 @@ impl TonicExporterBuilder { #[cfg(not(feature = "tls"))] let channel = endpoint.timeout(timeout).connect_lazy(); - let metadata = tonic_config.metadata.take().unwrap_or_default(); - let add_metadata = move |mut req: tonic::Request<()>| { - for key_and_value in metadata.iter() { - match key_and_value { - KeyAndValueRef::Ascii(key, value) => { - req.metadata_mut().append(key, value.to_owned()) - } - KeyAndValueRef::Binary(key, value) => { - req.metadata_mut().append_bin(key, value.to_owned()) - } - }; - } - - Ok(req) - }; - - let interceptor = match self.interceptor.take() { - Some(mut interceptor) => { - BoxInterceptor(Box::new(move |req| interceptor.call(add_metadata(req)?))) - } - None => BoxInterceptor(Box::new(add_metadata)), - }; - Ok((channel, interceptor, compression)) } /// Build a new tonic log exporter #[cfg(feature = "logs")] pub fn build_log_exporter( - mut self, + self, ) -> Result { use crate::exporter::tonic::logs::TonicLogsClient; @@ -300,7 +304,7 @@ impl TonicExporterBuilder { /// Build a new tonic metrics exporter #[cfg(feature = "metrics")] pub fn build_metrics_exporter( - mut self, + self, aggregation_selector: Box, temporality_selector: Box, ) -> opentelemetry::metrics::Result { @@ -326,7 +330,7 @@ impl TonicExporterBuilder { /// Build a new tonic span exporter #[cfg(feature = "trace")] pub fn build_span_exporter( - mut self, + self, ) -> Result { use crate::exporter::tonic::trace::TonicTracesClient; From a2c0dd8d10e6aae1c6a006be1a41ec38e97064b0 Mon Sep 17 00:00:00 2001 From: Shaun Cox Date: Wed, 15 Nov 2023 03:39:55 -0600 Subject: [PATCH 28/68] make severity of level computations const (#1369) --- opentelemetry-appender-log/src/lib.rs | 11 +++++------ opentelemetry-appender-tracing/src/layer.rs | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/opentelemetry-appender-log/src/lib.rs b/opentelemetry-appender-log/src/lib.rs index 8d3f5075a7..f3b3241c71 100644 --- a/opentelemetry-appender-log/src/lib.rs +++ b/opentelemetry-appender-log/src/lib.rs @@ -18,10 +18,9 @@ where { fn enabled(&self, _metadata: &Metadata) -> bool { #[cfg(feature = "logs_level_enabled")] - return self.logger.event_enabled( - map_severity_to_otel_severity(_metadata.level()), - _metadata.target(), - ); + return self + .logger + .event_enabled(severity_of_level(_metadata.level()), _metadata.target()); #[cfg(not(feature = "logs_level_enabled"))] true } @@ -30,7 +29,7 @@ where if self.enabled(record.metadata()) { self.logger.emit( LogRecordBuilder::new() - .with_severity_number(map_severity_to_otel_severity(record.level())) + .with_severity_number(severity_of_level(record.level())) .with_severity_text(record.level().as_str()) // Not populating ObservedTimestamp, instead relying on OpenTelemetry // API to populate it with current time. @@ -61,7 +60,7 @@ where } } -fn map_severity_to_otel_severity(level: Level) -> Severity { +const fn severity_of_level(level: Level) -> Severity { match level { Level::Error => Severity::Error, Level::Warn => Severity::Warn, diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index 840e4a6ae2..f704f4810d 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -1,5 +1,6 @@ use opentelemetry::logs::{LogRecord, Logger, LoggerProvider, Severity}; use std::borrow::Cow; +use tracing_core::Level; use tracing_subscriber::Layer; const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing"; @@ -100,7 +101,7 @@ where ) { let meta = event.metadata(); let mut log_record: LogRecord = LogRecord::default(); - log_record.severity_number = Some(map_severity_to_otel_severity(meta.level().as_str())); + log_record.severity_number = Some(severity_of_level(meta.level())); log_record.severity_text = Some(meta.level().to_string().into()); // add the `name` metadata to attributes @@ -124,19 +125,18 @@ where _event: &tracing_core::Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>, ) -> bool { - let severity = map_severity_to_otel_severity(_event.metadata().level().as_str()); + let severity = severity_of_level(_event.metadata().level()); self.logger .event_enabled(severity, _event.metadata().target()) } } -fn map_severity_to_otel_severity(level: &str) -> Severity { - match level { - "INFO" => Severity::Info, - "DEBUG" => Severity::Debug, - "TRACE" => Severity::Trace, - "WARN" => Severity::Warn, - "ERROR" => Severity::Error, - _ => Severity::Info, // won't reach here +const fn severity_of_level(level: &Level) -> Severity { + match *level { + Level::TRACE => Severity::Trace, + Level::DEBUG => Severity::Debug, + Level::INFO => Severity::Info, + Level::WARN => Severity::Warn, + Level::ERROR => Severity::Error, } } From c06c04bb127baaacd9aa4cee328107c078a7d2c3 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Wed, 15 Nov 2023 06:58:31 -0800 Subject: [PATCH 29/68] Remove API for Creating Histograms with signed integers. (#1371) As per the OTel [specs], the value to be recorded with histogram instrument SHOULD be non-negative. Removing the existing method to record signed integer values. > The value is expected to be non-negative. This API SHOULD be documented in a way to communicate to users that > this value is expected to be non-negative. This API SHOULD NOT validate this value, that is left to implementations > of the API. [specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#histogram --- opentelemetry-prometheus/src/lib.rs | 2 +- .../tests/integration_test.rs | 14 +++++------ opentelemetry-sdk/benches/metric.rs | 8 +++---- opentelemetry-sdk/src/metrics/meter.rs | 24 ------------------- opentelemetry/CHANGELOG.md | 3 ++- .../src/metrics/instruments/histogram.rs | 12 ---------- opentelemetry/src/metrics/meter.rs | 21 ---------------- opentelemetry/src/metrics/mod.rs | 10 -------- 8 files changed, 14 insertions(+), 80 deletions(-) diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index 804f69dea6..bb637f52b2 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -27,7 +27,7 @@ //! .with_description("Counts things") //! .init(); //! let histogram = meter -//! .i64_histogram("a.histogram") +//! .u64_histogram("a.histogram") //! .with_description("Records values") //! .init(); //! diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 0deb0d3105..aff4c43a99 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -492,7 +492,7 @@ fn duplicate_metrics() { name: "no_conflict_two_histograms", record_metrics: Box::new(|meter_a, meter_b| { let foo_a = meter_a - .i64_histogram("foo") + .u64_histogram("foo") .with_unit(Unit::new("By")) .with_description("meter histogram foo") .init(); @@ -500,7 +500,7 @@ fn duplicate_metrics() { foo_a.record(100, &[KeyValue::new("A", "B")]); let foo_b = meter_b - .i64_histogram("foo") + .u64_histogram("foo") .with_unit(Unit::new("By")) .with_description("meter histogram foo") .init(); @@ -564,7 +564,7 @@ fn duplicate_metrics() { name: "conflict_help_two_histograms", record_metrics: Box::new(|meter_a, meter_b| { let bar_a = meter_a - .i64_histogram("bar") + .u64_histogram("bar") .with_unit(Unit::new("By")) .with_description("meter a bar") .init(); @@ -572,7 +572,7 @@ fn duplicate_metrics() { bar_a.record(100, &[KeyValue::new("A", "B")]); let bar_b = meter_b - .i64_histogram("bar") + .u64_histogram("bar") .with_unit(Unit::new("By")) .with_description("meter b bar") .init(); @@ -635,7 +635,7 @@ fn duplicate_metrics() { name: "conflict_unit_two_histograms", record_metrics: Box::new(|meter_a, meter_b| { let bar_a = meter_a - .i64_histogram("bar") + .u64_histogram("bar") .with_unit(Unit::new("By")) .with_description("meter histogram bar") .init(); @@ -643,7 +643,7 @@ fn duplicate_metrics() { bar_a.record(100, &[KeyValue::new("A", "B")]); let bar_b = meter_b - .i64_histogram("bar") + .u64_histogram("bar") .with_unit(Unit::new("ms")) .with_description("meter histogram bar") .init(); @@ -692,7 +692,7 @@ fn duplicate_metrics() { foo_a.add(100, &[KeyValue::new("A", "B")]); let foo_histogram_a = meter_a - .i64_histogram("foo") + .u64_histogram("foo") .with_unit(Unit::new("By")) .with_description("meter histogram foo") .init(); diff --git a/opentelemetry-sdk/benches/metric.rs b/opentelemetry-sdk/benches/metric.rs index 052295c279..d018634e04 100644 --- a/opentelemetry-sdk/benches/metric.rs +++ b/opentelemetry-sdk/benches/metric.rs @@ -349,7 +349,7 @@ fn counters(c: &mut Criterion) { const MAX_BOUND: usize = 100000; -fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram) { +fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram) { let mut bounds = vec![0; bound_count]; #[allow(clippy::needless_range_loop)] for i in 0..bounds.len() { @@ -373,7 +373,7 @@ fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram) { } let mtr = builder.build().meter("test_meter"); let hist = mtr - .i64_histogram(format!("histogram_{}", bound_count)) + .u64_histogram(format!("histogram_{}", bound_count)) .init(); (r, hist) @@ -393,7 +393,7 @@ fn histograms(c: &mut Criterion) { format!("V,{},{},{}", bound_size, attr_size, i), )) } - let value: i64 = rng.gen_range(0..MAX_BOUND).try_into().unwrap(); + let value: u64 = rng.gen_range(0..MAX_BOUND).try_into().unwrap(); group.bench_function( format!("Record{}Attrs{}bounds", attr_size, bound_size), |b| b.iter(|| hist.record(value, &attributes)), @@ -414,7 +414,7 @@ fn benchmark_collect_histogram(b: &mut Bencher, n: usize) { .meter("sdk/metric/bench/histogram"); for i in 0..n { - let h = mtr.i64_histogram(format!("fake_data_{i}")).init(); + let h = mtr.u64_histogram(format!("fake_data_{i}")).init(); h.record(1, &[]); } diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index b025a9152d..5eaeba7745 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -444,24 +444,6 @@ impl InstrumentProvider for SdkMeter { .map(|i| Histogram::new(Arc::new(i))) } - fn i64_histogram( - &self, - name: Cow<'static, str>, - description: Option>, - unit: Option, - ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.i64_resolver); - - p.lookup( - InstrumentKind::Histogram, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Histogram::new(Arc::new(i))) - } - fn register_callback( &self, insts: &[Arc], @@ -819,7 +801,6 @@ mod tests { ); assert(meter.f64_histogram(name.into(), None, None).map(|_| ())); assert(meter.u64_histogram(name.into(), None, None).map(|_| ())); - assert(meter.i64_histogram(name.into(), None, None).map(|_| ())); } // (unit, expected error) @@ -909,11 +890,6 @@ mod tests { .u64_histogram("test".into(), None, unit.clone()) .map(|_| ()), ); - assert( - meter - .i64_histogram("test".into(), None, unit.clone()) - .map(|_| ()), - ); } } } diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 078c0f315b..e1b57c8105 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -13,9 +13,10 @@ gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of ### Removed -Removed `OrderMap` type as there was no requirement to use this over regular +- Removed `OrderMap` type as there was no requirement to use this over regular `HashMap`. [#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) +- Remove API for Creating Histograms with signed integers. [#1371](https://github.com/open-telemetry/opentelemetry-rust/pull/1371) ## [v0.21.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.20.0...v0.21.0) diff --git a/opentelemetry/src/metrics/instruments/histogram.rs b/opentelemetry/src/metrics/instruments/histogram.rs index af27636c14..c6246ebee2 100644 --- a/opentelemetry/src/metrics/instruments/histogram.rs +++ b/opentelemetry/src/metrics/instruments/histogram.rs @@ -60,15 +60,3 @@ impl TryFrom>> for Histogram { ) } } - -impl TryFrom>> for Histogram { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { - builder.meter.instrument_provider.i64_histogram( - builder.name, - builder.description, - builder.unit, - ) - } -} diff --git a/opentelemetry/src/metrics/meter.rs b/opentelemetry/src/metrics/meter.rs index d2bef62b72..a66a77e55c 100644 --- a/opentelemetry/src/metrics/meter.rs +++ b/opentelemetry/src/metrics/meter.rs @@ -241,19 +241,6 @@ pub trait MeterProvider { /// .as_ref(), /// ); /// -/// // i64 histogram -/// let i64_histogram = meter.i64_histogram("my_i64_histogram").init(); -/// -/// // Record measurements using the histogram instrument record() -/// i64_histogram.record( -/// 1, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ] -/// .as_ref(), -/// ); -/// /// // u64 histogram /// let u64_histogram = meter.u64_histogram("my_u64_histogram").init(); /// @@ -386,14 +373,6 @@ impl Meter { InstrumentBuilder::new(self, name.into()) } - /// creates an instrument builder for recording a distribution of values. - pub fn i64_histogram( - &self, - name: impl Into>, - ) -> InstrumentBuilder<'_, Histogram> { - InstrumentBuilder::new(self, name.into()) - } - /// Registers a callback to be called during the collection of a measurement /// cycle. /// diff --git a/opentelemetry/src/metrics/mod.rs b/opentelemetry/src/metrics/mod.rs index 07b71fc0e1..4241363411 100644 --- a/opentelemetry/src/metrics/mod.rs +++ b/opentelemetry/src/metrics/mod.rs @@ -238,16 +238,6 @@ pub trait InstrumentProvider { Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) } - /// creates an instrument for recording a distribution of values. - fn i64_histogram( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option, - ) -> Result> { - Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) - } - /// Captures the function that will be called during data collection. /// /// It is only valid to call `observe` within the scope of the passed function. From 33b5e9b4e2b1ce7b1ed209ad61e2fbcad73e627e Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Wed, 15 Nov 2023 18:07:37 -0800 Subject: [PATCH 30/68] Fix metric collections during PeriodicReader shutdown (#1375) --- opentelemetry-sdk/CHANGELOG.md | 2 ++ opentelemetry-sdk/src/metrics/periodic_reader.rs | 13 ++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 03c71af2f8..3a604cc610 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -22,6 +22,8 @@ `SpanData` now stores `events` as `SpanEvents` instead of `EvictedQueue` where `SpanEvents` is a struct with a `Vec` of events and `dropped_count`. +- [#1375](https://github.com/open-telemetry/opentelemetry-rust/pull/1375/) Fix metric collections during PeriodicReader shutdown + ## v0.21.1 ### Fixed diff --git a/opentelemetry-sdk/src/metrics/periodic_reader.rs b/opentelemetry-sdk/src/metrics/periodic_reader.rs index 54ccce04f0..17105c1bfb 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader.rs @@ -374,18 +374,21 @@ impl MetricReader for PeriodicReader { if inner.is_shutdown { return Err(MetricsError::Other("reader is already shut down".into())); } - inner.is_shutdown = true; let (sender, receiver) = oneshot::channel(); inner .message_sender .try_send(Message::Shutdown(sender)) .map_err(|e| MetricsError::Other(e.to_string()))?; - drop(inner); // don't hold lock when blocking on future - futures_executor::block_on(receiver) - .map_err(|err| MetricsError::Other(err.to_string())) - .and_then(|res| res) + let shutdown_result = futures_executor::block_on(receiver) + .map_err(|err| MetricsError::Other(err.to_string()))?; + + // Acquire the lock again to set the shutdown flag + let mut inner = self.inner.lock()?; + inner.is_shutdown = true; + + shutdown_result } } From a70bb74080ed0d74d6d8196c161edb2c5e941fef Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 17 Nov 2023 09:08:21 -0800 Subject: [PATCH 31/68] Remove evicted q and hmap (#1381) --- opentelemetry-sdk/Cargo.toml | 4 - opentelemetry-sdk/benches/key_value_map.rs | 216 ------------------ .../src/trace/evicted_hash_map.rs | 174 -------------- opentelemetry-sdk/src/trace/evicted_queue.rs | 138 ----------- opentelemetry-sdk/src/trace/mod.rs | 4 - 5 files changed, 536 deletions(-) delete mode 100644 opentelemetry-sdk/benches/key_value_map.rs delete mode 100644 opentelemetry-sdk/src/trace/evicted_hash_map.rs delete mode 100644 opentelemetry-sdk/src/trace/evicted_queue.rs diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index e81cd6739b..2e034399bd 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -57,10 +57,6 @@ rt-async-std = ["async-std"] name = "context" harness = false -[[bench]] -name = "key_value_map" -harness = false - [[bench]] name = "span_builder" harness = false diff --git a/opentelemetry-sdk/benches/key_value_map.rs b/opentelemetry-sdk/benches/key_value_map.rs deleted file mode 100644 index feff473702..0000000000 --- a/opentelemetry-sdk/benches/key_value_map.rs +++ /dev/null @@ -1,216 +0,0 @@ -use criterion::{ - black_box, criterion_group, criterion_main, BatchSize::SmallInput, BenchmarkId, Criterion, -}; -use indexmap::IndexMap; -use opentelemetry::{Key, KeyValue, Value}; -use opentelemetry_sdk::trace::EvictedHashMap; -#[cfg(not(target_os = "windows"))] -use pprof::criterion::{Output, PProfProfiler}; -use std::iter::Iterator; - -fn criterion_benchmark(c: &mut Criterion) { - let cap = 32; - let input = [(2, 32, cap), (8, 32, cap), (32, 32, cap)]; - populate_benchmark(c, &input); - lookup_benchmark(c, &input); - populate_and_lookup_benchmark(c, &input); -} - -fn populate_benchmark(c: &mut Criterion, input: &[(usize, u32, usize)]) { - let mut group = c.benchmark_group("populate"); - for &(n, max, capacity) in input { - let parameter_string = format!("{n:02}/{max:02}/{capacity:02}"); - - group.bench_function( - BenchmarkId::new("EvictedHashMap", parameter_string.clone()), - |b| { - b.iter(|| populate_evicted_hashmap(n, max, capacity)); - }, - ); - group.bench_function( - BenchmarkId::new("IndexMap", parameter_string.clone()), - |b| { - b.iter(|| populate_indexmap(n, max, capacity)); - }, - ); - group.bench_function(BenchmarkId::new("TwoVecs", parameter_string.clone()), |b| { - b.iter(|| populate_twovecs(n, max, capacity)); - }); - group.bench_function(BenchmarkId::new("OneVec", parameter_string.clone()), |b| { - b.iter(|| populate_onevec(n, max, capacity)); - }); - } - group.finish(); -} - -fn lookup_benchmark(c: &mut Criterion, input: &[(usize, u32, usize)]) { - let mut group = c.benchmark_group("lookup"); - for &(n, max, capacity) in input { - let lookup_keys = &MAP_KEYS[n - 2..n]; - let parameter_string = format!("{n:02}/{max:02}/{capacity:02}"); - group.bench_function( - BenchmarkId::new("EvictedHashMap", parameter_string.clone()), - |b| { - b.iter_batched( - || populate_evicted_hashmap(n, max, capacity), - |map| lookup_evicted_hashmap(&map, lookup_keys), - SmallInput, - ); - }, - ); - group.bench_function( - BenchmarkId::new("IndexMap", parameter_string.clone()), - |b| { - b.iter_batched( - || populate_indexmap(n, max, capacity), - |map| lookup_indexmap(&map, lookup_keys), - SmallInput, - ); - }, - ); - group.bench_function(BenchmarkId::new("OneVec", parameter_string.clone()), |b| { - b.iter_batched( - || populate_onevec(n, max, capacity), - |vec| lookup_onevec(&vec, lookup_keys), - SmallInput, - ); - }); - group.bench_function(BenchmarkId::new("TwoVecs", parameter_string.clone()), |b| { - b.iter_batched( - || populate_twovecs(n, max, capacity), - |(keys, vals)| lookup_twovec(&keys, &vals, lookup_keys), - SmallInput, - ); - }); - } - group.finish(); -} - -fn populate_and_lookup_benchmark(c: &mut Criterion, input: &[(usize, u32, usize)]) { - let mut group = c.benchmark_group("populate_and_lookup"); - for &(n, max, capacity) in input { - let lookup_keys = &MAP_KEYS[n - 2..n]; - let parameter_string = format!("{n:02}/{max:02}/{capacity:02}"); - group.bench_function( - BenchmarkId::new("EvictedHashMap", parameter_string.clone()), - |b| { - b.iter(|| { - let map = populate_evicted_hashmap(n, max, capacity); - lookup_evicted_hashmap(&map, lookup_keys); - }); - }, - ); - group.bench_function( - BenchmarkId::new("IndexMap", parameter_string.clone()), - |b| { - b.iter(|| { - let map = populate_indexmap(n, max, capacity); - lookup_indexmap(&map, lookup_keys); - }); - }, - ); - group.bench_function(BenchmarkId::new("OneVec", parameter_string.clone()), |b| { - b.iter(|| { - let vec = populate_onevec(n, max, capacity); - lookup_onevec(&vec, lookup_keys); - }); - }); - group.bench_function(BenchmarkId::new("TwoVecs", parameter_string.clone()), |b| { - b.iter(|| { - let (keys, vals) = populate_twovecs(n, max, capacity); - lookup_twovec(&keys, &vals, lookup_keys); - }); - }); - } - group.finish(); -} - -fn populate_evicted_hashmap(n: usize, max: u32, capacity: usize) -> EvictedHashMap { - let mut map = EvictedHashMap::new(max, capacity); - for (idx, key) in MAP_KEYS.iter().enumerate().take(n) { - map.insert(KeyValue::new(*key, idx as i64)); - } - map -} - -fn lookup_evicted_hashmap(map: &EvictedHashMap, keys: &[&'static str]) { - for key in keys { - black_box(map.get(&Key::new(*key))); - } -} - -fn populate_indexmap(n: usize, max: u32, _capacity: usize) -> IndexMap { - let mut map = IndexMap::with_capacity(max as usize); - for (idx, key) in MAP_KEYS.iter().enumerate().take(n) { - map.insert(Key::new(*key), Value::I64(idx as i64)); - } - map -} - -fn lookup_indexmap(map: &IndexMap, keys: &[&'static str]) { - for key in keys { - black_box(map.get(&Key::new(*key))); - } -} - -fn populate_onevec(n: usize, max: u32, _capacity: usize) -> Vec<(Key, Value)> { - let mut tuples = Vec::with_capacity(max as usize); - for (idx, key) in MAP_KEYS.iter().enumerate().take(n) { - tuples.push((Key::new(*key), Value::I64(idx as i64))); - } - tuples -} - -fn lookup_onevec(vec: &[(Key, Value)], keys: &[&'static str]) { - for key in keys { - black_box( - vec.iter() - .position(|(k, _v)| *k == Key::new(*key)) - .map(|idx| vec.get(idx)), - ); - } -} - -fn populate_twovecs(n: usize, max: u32, _capacity: usize) -> (Vec, Vec) { - let mut keys = Vec::with_capacity(max as usize); - let mut vals = Vec::with_capacity(max as usize); - for (idx, key) in MAP_KEYS.iter().enumerate().take(n) { - keys.push(Key::new(*key)); - vals.push(Value::I64(idx as i64)); - } - (keys, vals) -} - -fn lookup_twovec(keys: &[Key], vals: &[Value], lookup_keys: &[&'static str]) { - for key in lookup_keys { - black_box( - keys.iter() - .position(|k| *k == Key::new(*key)) - .map(|idx| vals.get(idx)), - ); - } -} - -const MAP_KEYS: [&str; 64] = [ - "key.1", "key.2", "key.3", "key.4", "key.5", "key.6", "key.7", "key.8", "key.9", "key.10", - "key.11", "key.12", "key.13", "key.14", "key.15", "key.16", "key.17", "key.18", "key.19", - "key.20", "key.21", "key.22", "key.23", "key.24", "key.25", "key.26", "key.27", "key.28", - "key.29", "key.30", "key.31", "key.32", "key.33", "key.34", "key.35", "key.36", "key.37", - "key.38", "key.39", "key.40", "key.41", "key.42", "key.43", "key.44", "key.45", "key.46", - "key.47", "key.48", "key.49", "key.50", "key.51", "key.52", "key.53", "key.54", "key.55", - "key.56", "key.57", "key.58", "key.59", "key.60", "key.61", "key.62", "key.63", "key.64", -]; - -#[cfg(not(target_os = "windows"))] -criterion_group! { - name = benches; - config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); - targets = criterion_benchmark -} -#[cfg(target_os = "windows")] -criterion_group! { - name = benches; - config = Criterion::default(); - targets = criterion_benchmark -} -criterion_main!(benches); diff --git a/opentelemetry-sdk/src/trace/evicted_hash_map.rs b/opentelemetry-sdk/src/trace/evicted_hash_map.rs deleted file mode 100644 index 46c84de62c..0000000000 --- a/opentelemetry-sdk/src/trace/evicted_hash_map.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! # Evicted Map - -use opentelemetry::{Key, KeyValue, Value}; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, LinkedList}; - -/// A hash map with a capped number of attributes that retains the most -/// recently set entries. -#[derive(Clone, Debug, PartialEq)] -pub struct EvictedHashMap { - map: HashMap, - evict_list: LinkedList, - max_len: u32, - dropped_count: u32, -} - -impl EvictedHashMap { - /// Create a new `EvictedHashMap` with a given max length and capacity. - pub fn new(max_len: u32, capacity: usize) -> Self { - EvictedHashMap { - map: HashMap::with_capacity(capacity), - evict_list: LinkedList::new(), - max_len, - dropped_count: 0, - } - } - - /// Inserts a key-value pair into the map. - pub fn insert(&mut self, item: KeyValue) { - let KeyValue { key, value } = item; - let mut already_exists = false; - // Check for existing item - match self.map.entry(key.clone()) { - Entry::Occupied(mut occupied) => { - occupied.insert(value); - already_exists = true; - } - Entry::Vacant(entry) => { - entry.insert(value); - } - } - - if already_exists { - self.move_key_to_front(key); - } else { - // Add new item - self.evict_list.push_front(key); - } - - // Verify size not exceeded - if self.evict_list.len() as u32 > self.max_len { - self.remove_oldest(); - self.dropped_count += 1; - } - } - - /// Returns the number of elements in the map. - pub fn len(&self) -> usize { - self.map.len() - } - - /// Returns `true` if the map is empty. - pub fn is_empty(&self) -> bool { - self.map.is_empty() - } - - /// Returns the dropped attribute count - pub fn dropped_count(&self) -> u32 { - self.dropped_count - } - - /// Returns a front-to-back iterator. - pub fn iter(&self) -> Iter<'_> { - Iter(self.map.iter()) - } - - /// Returns a reference to the value corresponding to the key if it exists - pub fn get(&self, key: &Key) -> Option<&Value> { - self.map.get(key) - } - - fn move_key_to_front(&mut self, key: Key) { - if self.evict_list.is_empty() { - // If empty, push front - self.evict_list.push_front(key); - } else if self.evict_list.front() == Some(&key) { - // Already the front, ignore - } else { - // Else split linked lists around key and combine - let key_idx = self - .evict_list - .iter() - .position(|k| k == &key) - .expect("key must exist in evicted hash map, this is a bug"); - let mut tail = self.evict_list.split_off(key_idx); - let item = tail.pop_front().unwrap(); - self.evict_list.push_front(item); - self.evict_list.append(&mut tail); - } - } - - fn remove_oldest(&mut self) { - if let Some(oldest_item) = self.evict_list.pop_back() { - self.map.remove(&oldest_item); - } - } -} - -/// An owned iterator over the entries of a `EvictedHashMap`. -#[derive(Debug)] -pub struct IntoIter(std::collections::hash_map::IntoIter); - -impl Iterator for IntoIter { - type Item = (Key, Value); - - fn next(&mut self) -> Option { - self.0.next() - } -} - -impl IntoIterator for EvictedHashMap { - type Item = (Key, Value); - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter(self.map.into_iter()) - } -} - -impl<'a> IntoIterator for &'a EvictedHashMap { - type Item = (&'a Key, &'a Value); - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Self::IntoIter { - Iter(self.map.iter()) - } -} - -/// An iterator over the entries of an `EvictedHashMap`. -#[derive(Debug)] -pub struct Iter<'a>(std::collections::hash_map::Iter<'a, Key, Value>); - -impl<'a> Iterator for Iter<'a> { - type Item = (&'a Key, &'a Value); - - fn next(&mut self) -> Option { - self.0.next() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::collections::HashSet; - - #[test] - fn insert_over_capacity_test() { - let max_len = 10; - let mut map = EvictedHashMap::new(max_len, max_len as usize); - - for i in 0..=max_len { - map.insert(Key::new(i.to_string()).bool(true)) - } - - assert_eq!(map.dropped_count, 1); - assert_eq!(map.len(), max_len as usize); - assert_eq!( - map.map.keys().cloned().collect::>(), - (1..=max_len) - .map(|i| Key::new(i.to_string())) - .collect::>() - ); - } -} diff --git a/opentelemetry-sdk/src/trace/evicted_queue.rs b/opentelemetry-sdk/src/trace/evicted_queue.rs deleted file mode 100644 index 9b3679efaa..0000000000 --- a/opentelemetry-sdk/src/trace/evicted_queue.rs +++ /dev/null @@ -1,138 +0,0 @@ -//! # Evicted Queue - -use std::collections::VecDeque; - -/// This queue maintains an ordered list of elements, and a count of -/// dropped elements. Elements are removed from the queue in a first -/// in first out fashion. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct EvictedQueue { - queue: Option>, - max_len: u32, - dropped_count: u32, -} - -impl EvictedQueue { - /// Create a new `EvictedQueue` with a given max length. - pub fn new(max_len: u32) -> Self { - EvictedQueue { - queue: None, - max_len, - dropped_count: 0, - } - } - - /// Push a new element to the back of the queue, dropping and - /// recording dropped count if over capacity. - pub(crate) fn push_back(&mut self, value: T) { - let queue = self.queue.get_or_insert_with(Default::default); - queue.push_back(value); - - if queue.len() as u32 > self.max_len { - queue.pop_front(); - self.dropped_count += 1; - } - } - - /// Moves all the elements of other into self, leaving other empty. - pub fn append_vec(&mut self, other: &mut Vec) { - self.extend(other.drain(..)); - } - - /// Returns `true` if the `EvictedQueue` is empty. - pub fn is_empty(&self) -> bool { - self.queue.as_ref().map_or(true, |queue| queue.is_empty()) - } - - /// Returns a front-to-back iterator. - pub fn iter(&self) -> Iter<'_, T> { - Iter(self.queue.as_ref().map(|queue| queue.iter())) - } - - /// Returns the number of elements in the `EvictedQueue`. - pub fn len(&self) -> usize { - self.queue.as_ref().map_or(0, |queue| queue.len()) - } - - /// Count of dropped attributes - pub fn dropped_count(&self) -> u32 { - self.dropped_count - } -} - -/// An owned iterator over the entries of a `EvictedQueue`. -#[derive(Debug)] -pub struct IntoIter(Option>); - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.0.as_mut().and_then(|iter| iter.next()) - } -} - -impl IntoIterator for EvictedQueue { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter(self.queue.map(|queue| queue.into_iter())) - } -} - -/// An iterator over the entries of an `EvictedQueue`. -#[derive(Debug)] -pub struct Iter<'a, T>(Option>); - -impl<'a, T: 'static> Iterator for Iter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - self.0.as_mut().and_then(|iter| iter.next()) - } -} - -impl Extend for EvictedQueue { - fn extend>(&mut self, iter: I) { - iter.into_iter().for_each(move |elt| self.push_back(elt)); - } -} - -#[cfg(test)] -mod tests { - use super::EvictedQueue; - use std::collections::VecDeque; - - #[test] - fn insert_over_capacity_test() { - let capacity = 10; - let mut queue = EvictedQueue::new(capacity); - - for i in 0..=capacity { - queue.push_back(i) - } - - assert_eq!(queue.dropped_count, 1); - assert_eq!(queue.len(), capacity as usize); - assert_eq!( - queue.queue.unwrap(), - (1..=capacity).collect::>() - ); - } - - #[test] - fn zero_capacity_test() { - let capacity = 0; - let mut queue = EvictedQueue::new(capacity); - - queue.push_back(1); - - assert_eq!(queue.dropped_count, 1); - assert_eq!(queue.len(), capacity as usize); - assert_eq!( - queue.queue.unwrap(), - (1..=capacity).collect::>() - ); - } -} diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index d031cc177f..932652d43b 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -8,8 +8,6 @@ //! * The [`TracerProvider`] struct which configures and produces [`Tracer`]s. mod config; mod events; -mod evicted_hash_map; -mod evicted_queue; mod id_generator; mod links; mod provider; @@ -21,8 +19,6 @@ mod tracer; pub use config::{config, Config}; pub use events::SpanEvents; -pub use evicted_hash_map::EvictedHashMap; -pub use evicted_queue::EvictedQueue; pub use id_generator::{aws::XrayIdGenerator, IdGenerator, RandomIdGenerator}; pub use links::SpanLinks; pub use provider::{Builder, TracerProvider}; From 9e2e3db72dd6b48ab0c3626ec52501a657621134 Mon Sep 17 00:00:00 2001 From: harscoet Date: Sun, 19 Nov 2023 08:32:10 +0100 Subject: [PATCH 32/68] OTLP tonic metadata from env variable (#1377) Fixes https://github.com/open-telemetry/opentelemetry-rust/issues/1336 As per the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.25.0/specification/protocol/exporter.md#specifying-headers-via-environment-variables), the custom headers for OTLP exporter can be specified through env variables - `OTEL_EXPORTER_OTLP_HEADERS`, `OTEL_EXPORTER_OTLP_TRACES_HEADERS`, `OTEL_EXPORTER_OTLP_METRICS_HEADERS`. This PR completes the work already done in PR open-telemetry/opentelemetry-rust#1290 adding support for tonic metadata To reproduce the same behavior as http exporter, the env-variable takes precedence (as discussed in open-telemetry/opentelemetry-rust-contrib#10) * Move common code for http and tonic exporters in `exporter/mod.rs` (function to parse header from string and test helper to run tests with isolated env variables) I wanted to minimize the changes but maybe it should be a good idea to use a crate like https://crates.io/crates/temp-env for environment related testing --- opentelemetry-otlp/CHANGELOG.md | 1 + opentelemetry-otlp/src/exporter/http/mod.rs | 44 ++------ opentelemetry-otlp/src/exporter/mod.rs | 75 +++++++++++++ opentelemetry-otlp/src/exporter/tonic/mod.rs | 108 ++++++++++++++++++- opentelemetry-otlp/src/lib.rs | 4 +- 5 files changed, 191 insertions(+), 41 deletions(-) diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index c324dd0ead..f6994aa03c 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -10,6 +10,7 @@ - Add `grpcio` metrics exporter (#1202) - Allow specifying OTLP HTTP headers from env variable (#1290) - Support custom channels in topic exporters [#1335](https://github.com/open-telemetry/opentelemetry-rust/pull/1335) +- Allow specifying OTLP Tonic metadata from env variable (#1377) ### Changed diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index e0c5e9cdff..d26d1302ca 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -10,7 +10,7 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; -use super::default_headers; +use super::{default_headers, parse_header_string}; #[cfg(feature = "metrics")] mod metrics; @@ -316,46 +316,18 @@ fn resolve_endpoint( #[allow(clippy::mutable_key_type)] // http headers are not mutated fn add_header_from_string(input: &str, headers: &mut HashMap) { - for pair in input.split_terminator(',') { - if pair.trim().is_empty() { - continue; - } - if let Some((k, v)) = pair.trim().split_once('=') { - if !k.trim().is_empty() && !v.trim().is_empty() { - if let (Ok(key), Ok(value)) = ( - HeaderName::from_str(k.trim()), - HeaderValue::from_str(v.trim()), - ) { - headers.insert(key, value); - } - } - } - } + headers.extend(parse_header_string(input).filter_map(|(key, value)| { + Some(( + HeaderName::from_str(key).ok()?, + HeaderValue::from_str(value).ok()?, + )) + })); } #[cfg(test)] mod tests { + use crate::exporter::tests::run_env_test; use crate::{OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}; - use std::sync::Mutex; - - // Make sure env tests are not running concurrently - static ENV_LOCK: Mutex<()> = Mutex::new(()); - - fn run_env_test(env_vars: T, f: F) - where - F: FnOnce(), - T: Into>, - { - let _env_lock = ENV_LOCK.lock().expect("env test lock poisoned"); - let env_vars = env_vars.into(); - for (k, v) in env_vars.iter() { - std::env::set_var(k, v); - } - f(); - for (k, _) in env_vars { - std::env::remove_var(k); - } - } #[test] fn test_append_signal_path_to_generic_env() { diff --git a/opentelemetry-otlp/src/exporter/mod.rs b/opentelemetry-otlp/src/exporter/mod.rs index a6236bc61f..eafd82e1aa 100644 --- a/opentelemetry-otlp/src/exporter/mod.rs +++ b/opentelemetry-otlp/src/exporter/mod.rs @@ -223,3 +223,78 @@ impl WithExportConfig for B { self } } + +#[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] +fn parse_header_string(value: &str) -> impl Iterator { + value + .split_terminator(',') + .map(str::trim) + .filter_map(parse_header_key_value_string) +} + +#[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] +fn parse_header_key_value_string(key_value_string: &str) -> Option<(&str, &str)> { + key_value_string + .split_once('=') + .map(|(key, value)| (key.trim(), value.trim())) + .filter(|(key, value)| !key.is_empty() && !value.is_empty()) +} + +#[cfg(test)] +#[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] +mod tests { + // Make sure env tests are not running concurrently + static ENV_LOCK: std::sync::Mutex<()> = std::sync::Mutex::new(()); + + pub(crate) fn run_env_test(env_vars: T, f: F) + where + F: FnOnce(), + T: Into>, + { + let _env_lock = ENV_LOCK.lock().expect("env test lock poisoned"); + let env_vars = env_vars.into(); + for (k, v) in env_vars.iter() { + std::env::set_var(k, v); + } + f(); + for (k, _) in env_vars { + std::env::remove_var(k); + } + } + + #[test] + fn test_parse_header_string() { + let test_cases = vec![ + // Format: (input_str, expected_headers) + ("k1=v1", vec![("k1", "v1")]), + ("k1=v1,k2=v2", vec![("k1", "v1"), ("k2", "v2")]), + ("k1=v1=10,k2,k3", vec![("k1", "v1=10")]), + ("k1=v1,,,k2,k3=10", vec![("k1", "v1"), ("k3", "10")]), + ]; + + for (input_str, expected_headers) in test_cases { + assert_eq!( + super::parse_header_string(input_str).collect::>(), + expected_headers, + ) + } + } + + #[test] + fn test_parse_header_key_value_string() { + let test_cases = vec![ + // Format: (input_str, expected_header) + ("k1=v1", Some(("k1", "v1"))), + ("", None), + ("=v1", None), + ("k1=", None), + ]; + + for (input_str, expected_headers) in test_cases { + assert_eq!( + super::parse_header_key_value_string(input_str), + expected_headers, + ) + } + } +} diff --git a/opentelemetry-otlp/src/exporter/tonic/mod.rs b/opentelemetry-otlp/src/exporter/tonic/mod.rs index 30840157be..7f5f8709b2 100644 --- a/opentelemetry-otlp/src/exporter/tonic/mod.rs +++ b/opentelemetry-otlp/src/exporter/tonic/mod.rs @@ -1,7 +1,9 @@ use std::env; use std::fmt::{Debug, Formatter}; +use std::str::FromStr; use std::time::Duration; +use http::{HeaderMap, HeaderName, HeaderValue}; use tonic::codec::CompressionEncoding; use tonic::metadata::{KeyAndValueRef, MetadataMap}; use tonic::service::Interceptor; @@ -9,11 +11,11 @@ use tonic::transport::Channel; #[cfg(feature = "tls")] use tonic::transport::ClientTlsConfig; -use super::default_headers; +use super::{default_headers, parse_header_string}; use crate::exporter::Compression; use crate::{ ExportConfig, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, - OTEL_EXPORTER_OTLP_TIMEOUT, + OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT, }; #[cfg(feature = "logs")] @@ -213,11 +215,17 @@ impl TonicExporterBuilder { signal_endpoint_path: &str, signal_timeout_var: &str, signal_compression_var: &str, + signal_headers_var: &str, ) -> Result<(Channel, BoxInterceptor, Option), crate::Error> { let tonic_config = self.tonic_config; let compression = resolve_compression(&tonic_config, signal_compression_var)?; - let metadata = tonic_config.metadata.unwrap_or_default(); + let headers_from_env = parse_headers_from_env(signal_headers_var); + let metadata = merge_metadata_with_headers_from_env( + tonic_config.metadata.unwrap_or_default(), + headers_from_env, + ); + let add_metadata = move |mut req: tonic::Request<()>| { for key_and_value in metadata.iter() { match key_and_value { @@ -294,6 +302,7 @@ impl TonicExporterBuilder { "/v1/logs", crate::logs::OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, crate::logs::OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, + crate::logs::OTEL_EXPORTER_OTLP_LOGS_HEADERS, )?; let client = TonicLogsClient::new(channel, interceptor, compression); @@ -316,6 +325,7 @@ impl TonicExporterBuilder { "/v1/metrics", crate::metric::OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, crate::metric::OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, + crate::metric::OTEL_EXPORTER_OTLP_METRICS_HEADERS, )?; let client = TonicMetricsClient::new(channel, interceptor, compression); @@ -339,6 +349,7 @@ impl TonicExporterBuilder { "/v1/traces", crate::span::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, crate::span::OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, + crate::span::OTEL_EXPORTER_OTLP_TRACES_HEADERS, )?; let client = TonicTracesClient::new(channel, interceptor, compression); @@ -347,11 +358,44 @@ impl TonicExporterBuilder { } } +fn merge_metadata_with_headers_from_env( + metadata: MetadataMap, + headers_from_env: HeaderMap, +) -> MetadataMap { + if headers_from_env.is_empty() { + metadata + } else { + let mut existing_headers: HeaderMap = metadata.into_headers(); + existing_headers.extend(headers_from_env); + + MetadataMap::from_headers(existing_headers) + } +} + +fn parse_headers_from_env(signal_headers_var: &str) -> HeaderMap { + env::var(signal_headers_var) + .or_else(|_| env::var(OTEL_EXPORTER_OTLP_HEADERS)) + .map(|input| { + parse_header_string(&input) + .filter_map(|(key, value)| { + Some(( + HeaderName::from_str(key).ok()?, + HeaderValue::from_str(value).ok()?, + )) + }) + .collect::() + }) + .unwrap_or_default() +} + #[cfg(test)] mod tests { + use crate::exporter::tests::run_env_test; #[cfg(feature = "gzip-tonic")] use crate::exporter::Compression; use crate::TonicExporterBuilder; + use crate::{OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS}; + use http::{HeaderMap, HeaderName, HeaderValue}; use tonic::metadata::{MetadataMap, MetadataValue}; #[test] @@ -393,4 +437,62 @@ mod tests { let builder = TonicExporterBuilder::default().with_compression(Compression::Gzip); assert_eq!(builder.tonic_config.compression.unwrap(), Compression::Gzip); } + + #[test] + fn test_parse_headers_from_env() { + run_env_test( + vec![ + (OTEL_EXPORTER_OTLP_TRACES_HEADERS, "k1=v1,k2=v2"), + (OTEL_EXPORTER_OTLP_HEADERS, "k3=v3"), + ], + || { + assert_eq!( + super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS), + HeaderMap::from_iter([ + ( + HeaderName::from_static("k1"), + HeaderValue::from_static("v1") + ), + ( + HeaderName::from_static("k2"), + HeaderValue::from_static("v2") + ), + ]) + ); + + assert_eq!( + super::parse_headers_from_env("EMPTY_ENV"), + HeaderMap::from_iter([( + HeaderName::from_static("k3"), + HeaderValue::from_static("v3") + )]) + ); + }, + ) + } + + #[test] + fn test_merge_metadata_with_headers_from_env() { + run_env_test( + vec![(OTEL_EXPORTER_OTLP_TRACES_HEADERS, "k1=v1,k2=v2")], + || { + let headers_from_env = + super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS); + + let mut metadata = MetadataMap::new(); + metadata.insert("foo", "bar".parse().unwrap()); + metadata.insert("k1", "v0".parse().unwrap()); + + let result = + super::merge_metadata_with_headers_from_env(metadata, headers_from_env); + + assert_eq!( + result.get("foo").unwrap(), + MetadataValue::from_static("bar") + ); + assert_eq!(result.get("k1").unwrap(), MetadataValue::from_static("v1")); + assert_eq!(result.get("k2").unwrap(), MetadataValue::from_static("v2")); + }, + ); + } } diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 0153d60218..1c8b41390e 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -349,12 +349,12 @@ pub enum Error { RequestFailed(#[from] opentelemetry_http::HttpError), /// The provided value is invalid in HTTP headers. - #[cfg(feature = "http-proto")] + #[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] #[error("http header value error {0}")] InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), /// The provided name is invalid in HTTP headers. - #[cfg(feature = "http-proto")] + #[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] #[error("http header name error {0}")] InvalidHeaderName(#[from] http::header::InvalidHeaderName), From abc23c166b49552883f9d53724534e7070550a67 Mon Sep 17 00:00:00 2001 From: harscoet Date: Sun, 19 Nov 2023 08:38:02 +0100 Subject: [PATCH 33/68] Run opentelemetry-jaeger tests consecutively (#1384) Limit threads to 1 (to force tests to run consecutively) to temporarily fix random [failures](https://github.com/open-telemetry/opentelemetry-rust/actions/runs/6915742069/job/18815025248) during `opentelemetry-jaeger` tests, due to environment variable updates from parallel tests If you run this command line multiple times, you should be able to reproduce it (`test_resolve_timeout` and `test_resolve_endpoint` are updating some environment variables) ```shell cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --all-features collector -- --test-threads=5 ``` --- scripts/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test.sh b/scripts/test.sh index fe7996f629..8f082fa279 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -11,6 +11,6 @@ cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -- --ignored --test-threads=1 cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --all-features +cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --all-features -- --test-threads=1 cargo test --manifest-path=opentelemetry-otlp/Cargo.toml --features "trace,grpc-sys" --no-default-features cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml --all-features From 28aca997e5a71cb393070eb5e2c3659eb4931056 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Mon, 20 Nov 2023 09:31:02 -0800 Subject: [PATCH 34/68] Attempt to write metric tests with inmemoryexp (#1382) --- opentelemetry-sdk/src/metrics/mod.rs | 100 +++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index c74e71bcd5..13e9d35cbc 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -57,3 +57,103 @@ pub use meter_provider::*; pub use periodic_reader::*; pub use pipeline::Pipeline; pub use view::*; + +#[cfg(all(test, feature = "testing"))] +mod tests { + use super::*; + use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; + use opentelemetry::{ + metrics::{MeterProvider as _, Unit}, + KeyValue, + }; + + // "multi_thread" tokio flavor must be used else flush won't + // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn counter_aggregation() { + // Run this test with stdout enabled to see output. + // cargo test counter --features=metrics,testing -- --nocapture + + // Arrange + let exporter = InMemoryMetricsExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + + // Act + let meter = meter_provider.meter("test"); + let counter = meter + .u64_counter("my_counter") + .with_unit(Unit::new("my_unit")) + .init(); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + + meter_provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!(!resource_metrics.is_empty()); + let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; + assert_eq!(metric.name, "my_counter"); + assert_eq!(metric.unit.as_str(), "my_unit"); + let sum = metric + .data + .as_any() + .downcast_ref::>() + .expect("Sum aggregation expected for Counter instruments by default"); + + // Expecting 2 time-series. + assert_eq!(sum.data_points.len(), 2); + assert!(sum.is_monotonic, "Counter should produce monotonic."); + assert_eq!( + sum.temporality, + data::Temporality::Cumulative, + "Should produce cumulative by default." + ); + + // find and validate key1=value1 datapoint + let mut data_point1 = None; + for datapoint in &sum.data_points { + if datapoint + .attributes + .iter() + .any(|(k, v)| k.as_str() == "key1" && v.as_str() == "value1") + { + data_point1 = Some(datapoint); + } + } + assert_eq!( + data_point1 + .expect("datapoint with key1=value1 expected") + .value, + 5 + ); + + // find and validate key1=value2 datapoint + let mut data_point1 = None; + for datapoint in &sum.data_points { + if datapoint + .attributes + .iter() + .any(|(k, v)| k.as_str() == "key1" && v.as_str() == "value2") + { + data_point1 = Some(datapoint); + } + } + assert_eq!( + data_point1 + .expect("datapoint with key1=value2 expected") + .value, + 3 + ); + } +} From 7848755c71216e1ea255d5af3926b2a9154ea867 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Tue, 21 Nov 2023 00:26:46 -0800 Subject: [PATCH 35/68] fix(api): move composite propagator to API (#1373) Move `TextMapCompositePropagator` from `opentelemetry-sdk` to `opentelemetry` crate. Fixes #1013 --- opentelemetry-sdk/CHANGELOG.md | 1 + opentelemetry-sdk/src/propagation/mod.rs | 2 - opentelemetry/CHANGELOG.md | 4 +- opentelemetry/Cargo.toml | 3 + .../src/propagation/composite.rs | 217 ++++++++---------- opentelemetry/src/propagation/mod.rs | 172 ++------------ .../src/propagation/text_map_propagator.rs | 12 +- 7 files changed, 132 insertions(+), 279 deletions(-) rename {opentelemetry-sdk => opentelemetry}/src/propagation/composite.rs (59%) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 3a604cc610..1b5e0b76f9 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -21,6 +21,7 @@ `SpanData` now stores `events` as `SpanEvents` instead of `EvictedQueue` where `SpanEvents` is a struct with a `Vec` of events and `dropped_count`. +- **Breaking** Remove `TextMapCompositePropagator` [#1373](https://github.com/open-telemetry/opentelemetry-rust/pull/1373). Use `TextMapCompositePropagator` in opentelemetry API. - [#1375](https://github.com/open-telemetry/opentelemetry-rust/pull/1375/) Fix metric collections during PeriodicReader shutdown diff --git a/opentelemetry-sdk/src/propagation/mod.rs b/opentelemetry-sdk/src/propagation/mod.rs index d0b88076d4..6cfb5d07bd 100644 --- a/opentelemetry-sdk/src/propagation/mod.rs +++ b/opentelemetry-sdk/src/propagation/mod.rs @@ -1,8 +1,6 @@ //! OpenTelemetry Propagators mod baggage; -mod composite; mod trace_context; pub use baggage::BaggagePropagator; -pub use composite::TextMapCompositePropagator; pub use trace_context::TraceContextPropagator; diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index e1b57c8105..0e7aebd62f 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -4,13 +4,15 @@ ### Changed -Modified `AnyValue.Map` to be backed by `HashMap` instead of custom `OrderMap`, +- Modified `AnyValue.Map` to be backed by `HashMap` instead of custom `OrderMap`, which internally used `IndexMap`. There was no requirement to maintain the order of entries, so moving from `IndexMap` to `HashMap` offers slight performance gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of `LogRecord`. [#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) +- Add `TextMapCompositePropagator` [#1373](https://github.com/open-telemetry/opentelemetry-rust/pull/1373) + ### Removed - Removed `OrderMap` type as there was no requirement to use this over regular diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 39ec98087f..957d2d6a49 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -38,3 +38,6 @@ metrics = [] testing = ["trace", "metrics"] logs = [] logs_level_enabled = ["logs"] + +[dev-dependencies] +opentelemetry_sdk = { path = "../opentelemetry-sdk" } # for documentation tests diff --git a/opentelemetry-sdk/src/propagation/composite.rs b/opentelemetry/src/propagation/composite.rs similarity index 59% rename from opentelemetry-sdk/src/propagation/composite.rs rename to opentelemetry/src/propagation/composite.rs index 218b5399ae..db638449e1 100644 --- a/opentelemetry-sdk/src/propagation/composite.rs +++ b/opentelemetry/src/propagation/composite.rs @@ -1,10 +1,17 @@ -use opentelemetry::{ +//! # Composite Propagator +//! +//! A utility over multiple `Propagator`s to group multiple Propagators from different cross-cutting +//! concerns in order to leverage them as a single entity. +//! +//! Each composite Propagator will implement a specific Propagator type, such as TextMapPropagator, +//! as different Propagator types will likely operate on different data types. +use crate::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, Context, }; use std::collections::HashSet; -/// Composite propagator +/// Composite propagator for [`TextMapPropagator`]s. /// /// A propagator that chains multiple [`TextMapPropagator`] propagators together, /// injecting or extracting by their respective HTTP header names. @@ -12,19 +19,18 @@ use std::collections::HashSet; /// Injection and extraction from this propagator will preserve the order of the /// injectors and extractors passed in during initialization. /// -/// [`TextMapPropagator`]: opentelemetry::propagation::TextMapPropagator -/// /// # Examples /// /// ``` /// use opentelemetry::{ /// baggage::BaggageExt, -/// propagation::TextMapPropagator, +/// propagation::{TextMapPropagator, TextMapCompositePropagator}, +/// /// trace::{TraceContextExt, Tracer, TracerProvider}, /// Context, KeyValue, /// }; /// use opentelemetry_sdk::propagation::{ -/// BaggagePropagator, TextMapCompositePropagator, TraceContextPropagator, +/// BaggagePropagator, TraceContextPropagator, /// }; /// use opentelemetry_sdk::trace as sdktrace; /// use std::collections::HashMap; @@ -67,7 +73,7 @@ pub struct TextMapCompositePropagator { impl TextMapCompositePropagator { /// Constructs a new propagator out of instances of [`TextMapPropagator`]. /// - /// [`TextMapPropagator`]: opentelemetry::propagation::TextMapPropagator + /// [`TextMapPropagator`]: TextMapPropagator pub fn new(propagators: Vec>) -> Self { let mut fields = HashSet::new(); for propagator in &propagators { @@ -107,31 +113,31 @@ impl TextMapPropagator for TextMapCompositePropagator { } } -#[cfg(all(test, feature = "testing", feature = "trace"))] +#[cfg(all(test, feature = "trace"))] mod tests { - use crate::propagation::{TextMapCompositePropagator, TraceContextPropagator}; + use crate::baggage::BaggageExt; + use crate::propagation::TextMapCompositePropagator; use crate::testing::trace::TestSpan; - use opentelemetry::{ + use crate::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, - Context, + Context, KeyValue, }; use std::collections::HashMap; - use std::str::FromStr; - /// Dummy propagator for testing - /// - /// The format we are using is {trace id(in base10 u128)}-{span id(in base10 u64)}-{flag(in u8)} + /// A test propagator that injects and extracts a single header. #[derive(Debug)] struct TestPropagator { - fields: [String; 1], + header: &'static str, + fields: Vec, // used by fields method } impl TestPropagator { #[allow(unreachable_pub)] - pub fn new() -> Self { + pub fn new(header: &'static str) -> Self { TestPropagator { - fields: ["testheader".to_string()], + header, + fields: vec![header.to_string()], } } } @@ -140,69 +146,59 @@ mod tests { fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { let span = cx.span(); let span_context = span.span_context(); - injector.set( - "testheader", - format!( - "{:x}-{:x}-{:02x}", - span_context.trace_id(), - span_context.span_id(), - span_context.trace_flags() - ), - ) + match self.header { + "span-id" => injector.set(self.header, format!("{:x}", span_context.span_id())), + "baggage" => injector.set(self.header, cx.baggage().to_string()), + _ => {} + } } fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - let span = if let Some(val) = extractor.get("testheader") { - let parts = val.split_terminator('-').collect::>(); - if parts.len() != 3 { - SpanContext::empty_context() - } else { - SpanContext::new( - TraceId::from_u128(u128::from_str(parts[0]).unwrap_or(0)), - SpanId::from_u64(u64::from_str(parts[1]).unwrap_or(0)), - TraceFlags::new(u8::from_str(parts[2]).unwrap_or(0)), - true, - TraceState::default(), - ) - } - } else { - SpanContext::empty_context() - }; - - cx.with_remote_span_context(span) + match (self.header, extractor.get(self.header)) { + ("span-id", Some(val)) => cx.with_remote_span_context(SpanContext::new( + TraceId::from_u128(1), + SpanId::from_u64(u64::from_str_radix(val, 16).unwrap()), + TraceFlags::default(), + false, + TraceState::default(), + )), + ("baggage", Some(_)) => cx.with_baggage(vec![KeyValue::new("baggagekey", "value")]), + _ => cx.clone(), + } } fn fields(&self) -> FieldIter<'_> { - FieldIter::new(&self.fields) + FieldIter::new(self.fields.as_slice()) } } + fn setup() -> Context { + let mut cx = Context::default(); + cx = cx.with_span(TestSpan(SpanContext::new( + TraceId::from_u128(1), + SpanId::from_u64(11), + TraceFlags::default(), + true, + TraceState::default(), + ))); + // setup for baggage propagator + cx.with_baggage(vec![KeyValue::new("baggagekey", "value")]) + } + fn test_data() -> Vec<(&'static str, &'static str)> { - vec![ - ("testheader", "1-1-00"), - ( - "traceparent", - "00-00000000000000000000000000000001-0000000000000001-00", - ), - ] + vec![("span-id", "b"), ("baggage", "baggagekey=value")] } #[test] fn zero_propogators_are_noop() { + // setup let composite_propagator = TextMapCompositePropagator::new(vec![]); + let cx = setup(); - let cx = Context::default().with_span(TestSpan(SpanContext::new( - TraceId::from_u128(1), - SpanId::from_u64(1), - TraceFlags::default(), - false, - TraceState::default(), - ))); let mut injector = HashMap::new(); composite_propagator.inject_context(&cx, &mut injector); assert_eq!(injector.len(), 0); - for (header_name, header_value) in test_data() { let mut extractor = HashMap::new(); extractor.insert(header_name.to_string(), header_value.to_string()); @@ -218,20 +214,12 @@ mod tests { #[test] fn inject_multiple_propagators() { - let test_propagator = TestPropagator::new(); - let trace_context = TraceContextPropagator::new(); let composite_propagator = TextMapCompositePropagator::new(vec![ - Box::new(test_propagator), - Box::new(trace_context), + Box::new(TestPropagator::new("span-id")), + Box::new(TestPropagator::new("baggage")), ]); - let cx = Context::default().with_span(TestSpan(SpanContext::new( - TraceId::from_u128(1), - SpanId::from_u64(1), - TraceFlags::default(), - false, - TraceState::default(), - ))); + let cx = setup(); let mut injector = HashMap::new(); composite_propagator.inject_context(&cx, &mut injector); @@ -242,64 +230,59 @@ mod tests { #[test] fn extract_multiple_propagators() { - let test_propagator = TestPropagator::new(); - let trace_context = TraceContextPropagator::new(); let composite_propagator = TextMapCompositePropagator::new(vec![ - Box::new(test_propagator), - Box::new(trace_context), + Box::new(TestPropagator::new("span-id")), + Box::new(TestPropagator::new("baggage")), ]); + let mut extractor = HashMap::new(); for (header_name, header_value) in test_data() { - let mut extractor = HashMap::new(); extractor.insert(header_name.to_string(), header_value.to_string()); - assert_eq!( - composite_propagator - .extract(&extractor) - .span() - .span_context(), - &SpanContext::new( - TraceId::from_u128(1), - SpanId::from_u64(1), - TraceFlags::default(), - true, - TraceState::default(), - ) - ); } + let cx = composite_propagator.extract(&extractor); + assert_eq!( + cx.span().span_context(), + &SpanContext::new( + TraceId::from_u128(1), + SpanId::from_u64(11), + TraceFlags::default(), + false, + TraceState::default(), + ) + ); + assert_eq!(cx.baggage().to_string(), "baggagekey=value",); } #[test] fn test_get_fields() { - let test_propagator = TestPropagator::new(); - let b3_fields = test_propagator - .fields() - .map(|s| s.to_string()) - .collect::>(); - - let trace_context = TraceContextPropagator::new(); - let trace_context_fields = trace_context - .fields() - .map(|s| s.to_string()) - .collect::>(); + let test_cases = vec![ + // name, header_name, expected_result + // ("single propagator", vec!["span-id"], vec!["span-id"]), + ( + "multiple propagators with order", + vec!["span-id", "baggage"], + vec!["baggage", "span-id"], + ), + ]; - let composite_propagator = TextMapCompositePropagator::new(vec![ - Box::new(test_propagator), - Box::new(trace_context), - ]); + for test_case in test_cases { + let test_propagators = test_case + .1 + .into_iter() + .map(|name| { + Box::new(TestPropagator::new(name)) as Box + }) + .collect(); - let mut fields = composite_propagator - .fields() - .map(|s| s.to_string()) - .collect::>(); - fields.sort(); + let composite_propagator = TextMapCompositePropagator::new(test_propagators); - let mut expected = vec![b3_fields, trace_context_fields] - .into_iter() - .flatten() - .collect::>(); - expected.sort(); - expected.dedup(); + let mut fields = composite_propagator + .fields() + .map(|s| s.to_string()) + .collect::>(); + fields.sort(); - assert_eq!(fields, expected); + assert_eq!(fields, test_case.2); + } } } diff --git a/opentelemetry/src/propagation/mod.rs b/opentelemetry/src/propagation/mod.rs index 8fec86f9da..7701b3a75a 100644 --- a/opentelemetry/src/propagation/mod.rs +++ b/opentelemetry/src/propagation/mod.rs @@ -1,170 +1,30 @@ //! # OpenTelemetry Propagator interface +//! Cross-cutting concerns send their state to the next process using Propagators, which are defined +//! as objects used to read and write context data to and from messages exchanged by the applications. //! -//! Propagators API consists of two main formats: +//! `Propagator`s leverage the [`Context`] to inject and extract data for each cross-cutting concern, +//! such as `TraceContext` and [`Baggage`]. //! -//! - `BinaryFormat` is used to serialize and deserialize a value -//! into a binary representation. -//! - `TextMapFormat` is used to inject and extract a value as -//! text into injectors and extractors that travel in-band across process boundaries. +//! The Propagators API is expected to be leveraged by users writing instrumentation libraries. //! -//! Deserializing must set `is_remote` to true on the returned -//! `SpanContext`. +//! Currently, the following `Propagator` types are supported: +//! - [`TextMapPropagator`], inject values into and extracts values from carriers as string key/value pairs //! -//! ## Binary Format +//! A binary Propagator type will be added in +//! the future, See [tracking issues](https://github.com/open-telemetry/opentelemetry-specification/issues/437)). //! -//! `BinaryFormat` is a formatter to serialize and deserialize a value -//! into a binary format. -//! -//! `BinaryFormat` MUST expose the APIs that serializes values into bytes, -//! and deserializes values from bytes. -//! -//! ### ToBytes -//! -//! Serializes the given value into the on-the-wire representation. -//! -//! Required arguments: -//! -//! - the value to serialize, can be `SpanContext` or `DistributedContext`. -//! -//! Returns the on-the-wire byte representation of the value. -//! -//! ### FromBytes -//! -//! Creates a value from the given on-the-wire encoded representation. -//! -//! If the value could not be parsed, the underlying implementation -//! SHOULD decide to return ether an empty value, an invalid value, or -//! a valid value. -//! -//! Required arguments: -//! -//! - on-the-wire byte representation of the value. -//! -//! Returns a value deserialized from bytes. -//! -//! ## TextMap Format -//! -//! `TextMapFormat` is a formatter that injects and extracts a value -//! as text into injectors and extractors that travel in-band across process boundaries. -//! -//! Encoding is expected to conform to the HTTP Header Field semantics. -//! Values are often encoded as RPC/HTTP request headers. -//! -//! The carrier of propagated data on both the client (injector) and -//! server (extractor) side is usually a http request. Propagation is -//! usually implemented via library-specific request interceptors, where -//! the client-side injects values and the server-side extracts them. -//! -//! `TextMapFormat` MUST expose the APIs that injects values into injectors, -//! and extracts values from extractors. -//! -//! ### Fields -//! -//! The propagation fields defined. If your injector is reused, you should -//! delete the fields here before calling `inject`. -//! -//! For example, if the injector is a single-use or immutable request object, -//! you don't need to clear fields as they couldn't have been set before. -//! If it is a mutable, retryable object, successive calls should clear -//! these fields first. -//! -//! The use cases of this are: -//! -//! - allow pre-allocation of fields, especially in systems like gRPC -//! Metadata -//! - allow a single-pass over an iterator -//! -//! Returns list of fields that will be used by this formatter. -//! -//! ### Inject -//! -//! Injects the value downstream. For example, as http headers. -//! -//! Required arguments: -//! -//! - the `SpanContext` to be injected. -//! - the injector that holds propagation fields. For example, an outgoing -//! message or http request. -//! - the `Setter` invoked for each propagation key to add or remove. -//! -//! #### Setter argument -//! -//! Setter is an argument in `Inject` that puts value into given field. -//! -//! `Setter` allows a `TextMapFormat` to set propagated fields into a -//! injector. -//! -//! `Setter` MUST be stateless and allowed to be saved as a constant to -//! avoid runtime allocations. One of the ways to implement it is `Setter` -//! class with `Put` method as described below. -//! -//! ##### Put -//! -//! Replaces a propagated field with the given value. -//! -//! Required arguments: -//! -//! - the injector holds propagation fields. For example, an outgoing message -//! or http request. -//! - the key of the field. -//! - the value of the field. -//! -//! The implementation SHOULD preserve casing (e.g. it should not transform -//! `Content-Type` to `content-type`) if the used protocol is case insensitive, -//! otherwise it MUST preserve casing. -//! -//! ### Extract -//! -//! Extracts the value from upstream. For example, as http headers. -//! -//! If the value could not be parsed, the underlying implementation will -//! decide to return an object representing either an empty value, an invalid -//! value, or a valid value. -//! -//! Required arguments: -//! -//! - the extractor holds propagation fields. For example, an outgoing message -//! or http request. -//! - the instance of `Getter` invoked for each propagation key to get. -//! -//! Returns the non-null extracted value. -//! -//! #### Getter argument -//! -//! Getter is an argument in `Extract` that get value from given field -//! -//! `Getter` allows a `TextMapFormat` to read propagated fields from a -//! extractor. -//! -//! `Getter` MUST be stateless and allowed to be saved as a constant to avoid -//! runtime allocations. One of the ways to implement it is `Getter` class -//! with `Get` method as described below. -//! -//! ##### Get -//! -//! The Get function MUST return the first value of the given propagation -//! key or return `None` if the key doesn't exist. -//! -//! Required arguments: -//! -//! - the extractor of propagation fields, such as an HTTP request. -//! - the key of the field. -//! -//! The `get` function is responsible for handling case sensitivity. If -//! the getter is intended to work with an HTTP request object, the getter -//! MUST be case insensitive. To improve compatibility with other text-based -//! protocols, text format implementations MUST ensure to always use the -//! canonical casing for their attributes. NOTE: Canonical casing for HTTP -//! headers is usually title case (e.g. `Content-Type` instead of `content-type`). -//! -//! ##### Keys -//! -//! The Keys function returns a vector of the propagation keys. +//! `Propagator`s uses [`Injector`] and [`Extractor`] to read and write context data to and from messages. +//! Each specific Propagator type defines its expected carrier type, such as a string map or a byte array. //! +//! [`Baggage`]: crate::baggage::Baggage +//! [`Context`]: crate::Context + use std::collections::HashMap; +pub mod composite; pub mod text_map_propagator; +pub use composite::TextMapCompositePropagator; pub use text_map_propagator::TextMapPropagator; /// Injector provides an interface for adding fields from an underlying struct like `HashMap` diff --git a/opentelemetry/src/propagation/text_map_propagator.rs b/opentelemetry/src/propagation/text_map_propagator.rs index 4307905a30..c078e49456 100644 --- a/opentelemetry/src/propagation/text_map_propagator.rs +++ b/opentelemetry/src/propagation/text_map_propagator.rs @@ -1,7 +1,13 @@ -//! # Text Propagator +//! # TextMapPropagator //! -//! `TextMapPropagator` is a formatter to serialize and deserialize a value into a -//! text format. +//! [`TextMapPropagator`] performs the injection and extraction of a cross-cutting concern value as +//! string key/values pairs into carriers that travel in-band across process boundaries. +//! +//! The carrier of propagated data on both the client (injector) and server (extractor) side is +//! usually an HTTP request. +//! +//! In order to increase compatibility, the key/value pairs MUST only consist of US-ASCII characters +//! that make up valid HTTP header fields as per RFC 7230. use crate::{ propagation::{Extractor, Injector}, Context, From 3939ab58fe6edfcf2287ee5c047f3ff435df3bc0 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Tue, 21 Nov 2023 09:33:51 -0800 Subject: [PATCH 36/68] Test to validate identical instruments are merged (#1385) --- opentelemetry-sdk/src/metrics/mod.rs | 53 ++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 13e9d35cbc..9eb34d9303 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -156,4 +156,57 @@ mod tests { 3 ); } + + // "multi_thread" tokio flavor must be used else flush won't + // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn counter_duplicate_instrument_merge() { + // Arrange + let exporter = InMemoryMetricsExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + + // Act + let meter = meter_provider.meter("test"); + let counter = meter + .u64_counter("my_counter") + .with_unit(Unit::new("my_unit")) + .with_description("my_description") + .init(); + + let counter_duplicated = meter + .u64_counter("my_counter") + .with_unit(Unit::new("my_unit")) + .with_description("my_description") + .init(); + + let attribute = vec![KeyValue::new("key1", "value1")]; + counter.add(10, &attribute); + counter_duplicated.add(5, &attribute); + + meter_provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!( + resource_metrics[0].scope_metrics[0].metrics.len() == 1, + "There should be single metric merging duplicate instruments" + ); + let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; + assert_eq!(metric.name, "my_counter"); + assert_eq!(metric.unit.as_str(), "my_unit"); + let sum = metric + .data + .as_any() + .downcast_ref::>() + .expect("Sum aggregation expected for Counter instruments by default"); + + // Expecting 1 time-series. + assert_eq!(sum.data_points.len(), 1); + + let datapoint = &sum.data_points[0]; + assert_eq!(datapoint.value, 15); + } } From 32299790f178ef417431397659ad6b85d8c739f5 Mon Sep 17 00:00:00 2001 From: Rene B Date: Wed, 22 Nov 2023 18:19:31 +0100 Subject: [PATCH 37/68] Add missing event timestamps to `opentelemetry-stdout` (#1391) --- opentelemetry-stdout/examples/basic.rs | 4 ++++ opentelemetry-stdout/src/trace/transform.rs | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/opentelemetry-stdout/examples/basic.rs b/opentelemetry-stdout/examples/basic.rs index d24fba671b..41cfa7a30f 100644 --- a/opentelemetry-stdout/examples/basic.rs +++ b/opentelemetry-stdout/examples/basic.rs @@ -37,6 +37,10 @@ async fn main() -> Result<(), Box> { let tracer = tracer_provider.tracer("stdout-test"); let mut span = tracer.start("test_span"); span.set_attribute(KeyValue::new("test_key", "test_value")); + span.add_event( + "test_event", + vec![KeyValue::new("test_event_key", "test_event_value")], + ); span.end(); let meter = meter_provider.meter("stdout-test"); diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index 877c3190e8..8a83c41629 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -154,6 +154,10 @@ struct Event { name: Cow<'static, str>, attributes: Vec, dropped_attributes_count: u32, + #[serde(serialize_with = "as_unix_nano")] + time_unix_nano: SystemTime, + #[serde(serialize_with = "as_human_readable")] + time: SystemTime, } impl From for Event { @@ -162,6 +166,8 @@ impl From for Event { name: value.name, attributes: value.attributes.into_iter().map(Into::into).collect(), dropped_attributes_count: value.dropped_attributes_count, + time_unix_nano: value.timestamp, + time: value.timestamp, } } } From 04c863e5608a765770556a85b44b2e5400a59d65 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 Nov 2023 09:50:33 -0800 Subject: [PATCH 38/68] Add focused benchmark for metric hotpath (#1389) --- opentelemetry-sdk/Cargo.toml | 8 +++ opentelemetry-sdk/benches/attribute_set.rs | 40 +++++++++++ opentelemetry-sdk/benches/metric_counter.rs | 74 +++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 opentelemetry-sdk/benches/attribute_set.rs create mode 100644 opentelemetry-sdk/benches/metric_counter.rs diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 2e034399bd..6763c584de 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -61,6 +61,14 @@ harness = false name = "span_builder" harness = false +[[bench]] +name = "metric_counter" +harness = false + +[[bench]] +name = "attribute_set" +harness = false + [[bench]] name = "trace" harness = false diff --git a/opentelemetry-sdk/benches/attribute_set.rs b/opentelemetry-sdk/benches/attribute_set.rs new file mode 100644 index 0000000000..6f3360b9cf --- /dev/null +++ b/opentelemetry-sdk/benches/attribute_set.rs @@ -0,0 +1,40 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use opentelemetry::KeyValue; +use opentelemetry_sdk::AttributeSet; + +// Run this benchmark with: +// cargo bench --bench metric_counter + +fn criterion_benchmark(c: &mut Criterion) { + attribute_set(c); +} + +fn attribute_set(c: &mut Criterion) { + c.bench_function("AttributeSet_without_duplicates", |b| { + b.iter(|| { + let attributes: &[KeyValue] = &[ + KeyValue::new("attribute1", "value1"), + KeyValue::new("attribute2", "value2"), + KeyValue::new("attribute3", "value3"), + KeyValue::new("attribute4", "value4"), + ]; + let _attribute_set: AttributeSet = attributes.into(); + }); + }); + + c.bench_function("AttributeSet_with_duplicates", |b| { + b.iter(|| { + let attributes: &[KeyValue] = &[ + KeyValue::new("attribute1", "value1"), + KeyValue::new("attribute3", "value3"), + KeyValue::new("attribute3", "value3"), + KeyValue::new("attribute4", "value4"), + ]; + let _attribute_set: AttributeSet = attributes.into(); + }); + }); +} + +criterion_group!(benches, criterion_benchmark); + +criterion_main!(benches); diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs new file mode 100644 index 0000000000..ba21da07c8 --- /dev/null +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -0,0 +1,74 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use opentelemetry::{ + metrics::{Counter, MeterProvider as _}, + KeyValue, +}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +use rand::{rngs::SmallRng, Rng, SeedableRng}; + +// Run this benchmark with: +// cargo bench --bench metric_counter --features=metrics,testing +fn create_counter() -> Counter { + let meter_provider: SdkMeterProvider = SdkMeterProvider::builder() + .with_reader(ManualReader::builder().build()) + .build(); + let meter = meter_provider.meter("benchmarks"); + let counter = meter.u64_counter("counter_bench").init(); + counter +} + +fn criterion_benchmark(c: &mut Criterion) { + counter_add(c); +} + +fn counter_add(c: &mut Criterion) { + let attribute_values = [ + "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", + "value10", + ]; + + let counter = create_counter(); + c.bench_function("Counter_Add_Sorted", |b| { + b.iter(|| { + let mut rng = SmallRng::from_entropy(); + // 4*4*10*10 = 1600 time series. + let index_first_attribute = rng.gen_range(0..4); + let index_second_attribute = rng.gen_range(0..4); + let index_third_attribute = rng.gen_range(0..10); + let index_forth_attribute = rng.gen_range(0..10); + counter.add( + 1, + &[ + KeyValue::new("attribute1", attribute_values[index_first_attribute]), + KeyValue::new("attribute2", attribute_values[index_second_attribute]), + KeyValue::new("attribute3", attribute_values[index_third_attribute]), + KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + ], + ); + }); + }); + + c.bench_function("Counter_Add_Unsorted", |b| { + b.iter(|| { + let mut rng = SmallRng::from_entropy(); + // 4*4*10*10 = 1600 time series. + let index_first_attribute = rng.gen_range(0..4); + let index_second_attribute = rng.gen_range(0..4); + let index_third_attribute = rng.gen_range(0..10); + let index_forth_attribute = rng.gen_range(0..10); + counter.add( + 1, + &[ + KeyValue::new("attribute2", attribute_values[index_second_attribute]), + KeyValue::new("attribute3", attribute_values[index_third_attribute]), + KeyValue::new("attribute1", attribute_values[index_first_attribute]), + KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + ], + ); + }); + }); +} + +criterion_group!(benches, criterion_benchmark); + +criterion_main!(benches); From 6e1c8c184496e5661af5e97072ba48a1de5377f6 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 Nov 2023 10:26:27 -0800 Subject: [PATCH 39/68] Cleanup indexmap from allowed external types (#1392) More cleanups following https://github.com/open-telemetry/opentelemetry-rust/pull/1381/files --- opentelemetry/allowed-external-types.toml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/opentelemetry/allowed-external-types.toml b/opentelemetry/allowed-external-types.toml index 3cc9df2347..90d58d7e9c 100644 --- a/opentelemetry/allowed-external-types.toml +++ b/opentelemetry/allowed-external-types.toml @@ -3,16 +3,6 @@ # This is used with cargo-check-external-types to reduce the surface area of downstream crates from # the public API. Ideally this can have a few exceptions as possible. allowed_external_types = [ - "indexmap::map::core::Entry", - "indexmap::map::iter::Drain", - "indexmap::map::iter::IterMut", - "indexmap::map::iter::Iter", - "indexmap::map::iter::IntoIter", - "indexmap::map::iter::IntoKeys", - "indexmap::map::iter::Keys", - "indexmap::map::iter::IntoValues", - "indexmap::map::iter::Values", - "indexmap::map::iter::ValuesMut", "equivalent::Equivalent", "futures_sink::Sink", # TODO: This is a pre-1.0 crate, we can't easily stabilize with this in the public API "futures_core::stream::Stream", # TODO: This is a pre-1.0 crate, we can't easily stabilize with this in the public API From 073f7a6b6a69c823e2debcc7e424677a3aa4a19f Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 Nov 2023 10:54:48 -0800 Subject: [PATCH 40/68] Remove jaeger crate from tracing example deps (#1393) --- README.md | 3 +-- examples/tracing-jaeger/Cargo.toml | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index ea6227b176..6db1293150 100644 --- a/README.md +++ b/README.md @@ -107,8 +107,7 @@ In particular, the following crates are likely to be of interest: are experimental. * [`opentelemetry-http`] provides an interface for injecting and extracting trace information from [`http`] headers. -* [`opentelemetry-jaeger`] provides a pipeline and exporter for sending trace - information to [`Jaeger`]. +* [`opentelemetry-jaeger`] provides context propagation using [jaeger propagation format](https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format). * [`opentelemetry-otlp`] exporter for sending trace and metric data in the OTLP format to the OpenTelemetry collector. * [`opentelemetry-prometheus`] provides a pipeline and exporter for sending diff --git a/examples/tracing-jaeger/Cargo.toml b/examples/tracing-jaeger/Cargo.toml index afafd3fdb1..a346b5b8af 100644 --- a/examples/tracing-jaeger/Cargo.toml +++ b/examples/tracing-jaeger/Cargo.toml @@ -7,7 +7,6 @@ publish = false [dependencies] opentelemetry = { path = "../../opentelemetry" } -opentelemetry-jaeger = { path = "../../opentelemetry-jaeger" } opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] } opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic"] } tokio = { version = "1.0", features = ["full"] } From 075d26cf23ad7c7541aca8272c863d65220248bd Mon Sep 17 00:00:00 2001 From: Matthew Shapiro Date: Wed, 22 Nov 2023 15:24:40 -0500 Subject: [PATCH 41/68] Improve cost of creating AttributeSets (#1379) --- opentelemetry-sdk/src/attributes/set.rs | 47 +++++++++++++------------ 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/opentelemetry-sdk/src/attributes/set.rs b/opentelemetry-sdk/src/attributes/set.rs index 4dff0822f5..06490879a1 100644 --- a/opentelemetry-sdk/src/attributes/set.rs +++ b/opentelemetry-sdk/src/attributes/set.rs @@ -1,6 +1,6 @@ +use std::collections::HashSet; use std::{ cmp::Ordering, - collections::{BTreeSet, HashSet}, hash::{Hash, Hasher}, }; @@ -105,36 +105,37 @@ impl Eq for HashKeyValue {} /// This must implement [Hash], [PartialEq], and [Eq] so it may be used as /// HashMap keys and other de-duplication methods. #[derive(Clone, Default, Debug, Hash, PartialEq, Eq)] -pub struct AttributeSet(BTreeSet); +pub struct AttributeSet(Vec); impl From<&[KeyValue]> for AttributeSet { fn from(values: &[KeyValue]) -> Self { - let mut seen = HashSet::with_capacity(values.len()); - AttributeSet( - values - .iter() - .rev() - .filter_map(|kv| { - if seen.contains(&&kv.key) { - None - } else { - seen.insert(&kv.key); - Some(HashKeyValue(kv.clone())) - } - }) - .collect(), - ) + let mut seen_keys = HashSet::with_capacity(values.len()); + let mut vec = values + .iter() + .rev() + .filter_map(|kv| { + if seen_keys.insert(kv.key.clone()) { + Some(HashKeyValue(kv.clone())) + } else { + None + } + }) + .collect::>(); + vec.sort_unstable(); + + AttributeSet(vec) } } impl From<&Resource> for AttributeSet { fn from(values: &Resource) -> Self { - AttributeSet( - values - .iter() - .map(|(key, value)| HashKeyValue(KeyValue::new(key.clone(), value.clone()))) - .collect(), - ) + let mut vec = values + .iter() + .map(|(key, value)| HashKeyValue(KeyValue::new(key.clone(), value.clone()))) + .collect::>(); + vec.sort_unstable(); + + AttributeSet(vec) } } From f2d57b13cc9db17efdab785f25a822fdee3fb679 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Sun, 26 Nov 2023 23:58:54 -0800 Subject: [PATCH 42/68] fix(sdk, zipkin, otlp): use `temp_env` for testing (#1403) Fixes #1383 --- opentelemetry-otlp/Cargo.toml | 1 + opentelemetry-otlp/src/exporter/mod.rs | 19 ++-- opentelemetry-sdk/Cargo.toml | 2 + .../src/metrics/meter_provider.rs | 83 +++++++++-------- opentelemetry-sdk/src/resource/env.rs | 92 +++++++++++-------- opentelemetry-sdk/src/resource/mod.rs | 40 +++++--- opentelemetry-sdk/src/trace/provider.rs | 74 ++++++++------- opentelemetry-sdk/src/trace/span_processor.rs | 51 +++++----- opentelemetry-zipkin/Cargo.toml | 1 + opentelemetry-zipkin/src/exporter/env.rs | 17 ++-- 10 files changed, 209 insertions(+), 171 deletions(-) diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 35cae87b5f..8ef2a54f28 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -52,6 +52,7 @@ opentelemetry_sdk = { features = ["trace", "rt-tokio", "testing"], path = "../op time = { version = "0.3", features = ["macros"] } tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } futures-util = { version = "0.3", default-features = false } +temp-env = "0.3.6" [features] # telemetry pillars and functions diff --git a/opentelemetry-otlp/src/exporter/mod.rs b/opentelemetry-otlp/src/exporter/mod.rs index eafd82e1aa..93fbdec724 100644 --- a/opentelemetry-otlp/src/exporter/mod.rs +++ b/opentelemetry-otlp/src/exporter/mod.rs @@ -243,23 +243,20 @@ fn parse_header_key_value_string(key_value_string: &str) -> Option<(&str, &str)> #[cfg(test)] #[cfg(any(feature = "grpc-tonic", feature = "http-proto"))] mod tests { - // Make sure env tests are not running concurrently - static ENV_LOCK: std::sync::Mutex<()> = std::sync::Mutex::new(()); pub(crate) fn run_env_test(env_vars: T, f: F) where F: FnOnce(), T: Into>, { - let _env_lock = ENV_LOCK.lock().expect("env test lock poisoned"); - let env_vars = env_vars.into(); - for (k, v) in env_vars.iter() { - std::env::set_var(k, v); - } - f(); - for (k, _) in env_vars { - std::env::remove_var(k); - } + temp_env::with_vars( + env_vars + .into() + .iter() + .map(|&(k, v)| (k, Some(v))) + .collect::)>>(), + f, + ) } #[test] diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 6763c584de..f8a7bf5530 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -38,6 +38,8 @@ rustdoc-args = ["--cfg", "docsrs"] [dev-dependencies] indexmap = "2.0" criterion = { version = "0.5", features = ["html_reports"] } +temp-env = "0.3.6" + [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { version = "0.13", features = ["flamegraph", "criterion"] } diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index 53128de1d8..7ea1ec9b9a 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -247,51 +247,54 @@ mod tests { .build(); assert_service_name(custom_meter_provider, Some("test_service")); - // If `OTEL_RESOURCE_ATTRIBUTES` is set, read them automatically - let reader3 = TestMetricReader {}; - env::set_var("OTEL_RESOURCE_ATTRIBUTES", "key1=value1, k2, k3=value2"); - let env_resource_provider = super::SdkMeterProvider::builder() - .with_reader(reader3) - .build(); - assert_eq!( - env_resource_provider.pipes.0[0].resource, - Resource::new(vec![ - KeyValue::new("telemetry.sdk.name", "opentelemetry"), - KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), - KeyValue::new("telemetry.sdk.language", "rust"), - KeyValue::new("key1", "value1"), - KeyValue::new("k3", "value2"), - KeyValue::new("service.name", "unknown_service"), - ]) + temp_env::with_var( + "OTEL_RESOURCE_ATTRIBUTES", + Some("key1=value1, k2, k3=value2"), + || { + // If `OTEL_RESOURCE_ATTRIBUTES` is set, read them automatically + let reader3 = TestMetricReader {}; + let env_resource_provider = super::SdkMeterProvider::builder() + .with_reader(reader3) + .build(); + assert_eq!( + env_resource_provider.pipes.0[0].resource, + Resource::new(vec![ + KeyValue::new("telemetry.sdk.name", "opentelemetry"), + KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), + KeyValue::new("telemetry.sdk.language", "rust"), + KeyValue::new("key1", "value1"), + KeyValue::new("k3", "value2"), + KeyValue::new("service.name", "unknown_service"), + ]) + ); + }, ); // When `OTEL_RESOURCE_ATTRIBUTES` is set and also user provided config - env::set_var( + temp_env::with_var( "OTEL_RESOURCE_ATTRIBUTES", - "my-custom-key=env-val,k2=value2", - ); - let reader4 = TestMetricReader {}; - let user_provided_resource_config_provider = super::SdkMeterProvider::builder() - .with_reader(reader4) - .with_resource( - Resource::default().merge(&mut Resource::new(vec![KeyValue::new( - "my-custom-key", - "my-custom-value", - )])), - ) - .build(); - assert_eq!( - user_provided_resource_config_provider.pipes.0[0].resource, - Resource::new(vec![ - KeyValue::new("telemetry.sdk.name", "opentelemetry"), - KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), - KeyValue::new("telemetry.sdk.language", "rust"), - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("k2", "value2"), - KeyValue::new("service.name", "unknown_service"), - ]) + Some("my-custom-key=env-val,k2=value2"), + || { + let reader4 = TestMetricReader {}; + let user_provided_resource_config_provider = super::SdkMeterProvider::builder() + .with_reader(reader4) + .with_resource(Resource::default().merge(&mut Resource::new(vec![ + KeyValue::new("my-custom-key", "my-custom-value"), + ]))) + .build(); + assert_eq!( + user_provided_resource_config_provider.pipes.0[0].resource, + Resource::new(vec![ + KeyValue::new("telemetry.sdk.name", "opentelemetry"), + KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), + KeyValue::new("telemetry.sdk.language", "rust"), + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("k2", "value2"), + KeyValue::new("service.name", "unknown_service"), + ]) + ); + }, ); - env::remove_var("OTEL_RESOURCE_ATTRIBUTES"); // If user provided a resource, it takes priority during collision. let reader5 = TestMetricReader {}; diff --git a/opentelemetry-sdk/src/resource/env.rs b/opentelemetry-sdk/src/resource/env.rs index 15cc31cd24..ae3bf5cfda 100644 --- a/opentelemetry-sdk/src/resource/env.rs +++ b/opentelemetry-sdk/src/resource/env.rs @@ -99,30 +99,34 @@ mod tests { use crate::resource::{EnvResourceDetector, Resource, ResourceDetector}; use opentelemetry::{Key, KeyValue, Value}; use std::time::Duration; - use std::{env, time}; #[test] fn test_read_from_env() { - env::set_var(OTEL_RESOURCE_ATTRIBUTES, "key=value, k = v , a= x, a=z"); - env::set_var("irrelevant".to_uppercase(), "20200810"); - - let detector = EnvResourceDetector::new(); - let resource = detector.detect(time::Duration::from_secs(5)); - assert_eq!( - resource, - Resource::new(vec![ - KeyValue::new("key", "value"), - KeyValue::new("k", "v"), - KeyValue::new("a", "x"), - KeyValue::new("a", "z"), - ]) + temp_env::with_vars( + [ + ( + "OTEL_RESOURCE_ATTRIBUTES", + Some("key=value, k = v , a= x, a=z"), + ), + ("IRRELEVANT", Some("20200810")), + ], + || { + let detector = EnvResourceDetector::new(); + let resource = detector.detect(Duration::from_secs(5)); + assert_eq!( + resource, + Resource::new(vec![ + KeyValue::new("key", "value"), + KeyValue::new("k", "v"), + KeyValue::new("a", "x"), + KeyValue::new("a", "z"), + ]) + ); + }, ); - // Test this case in same test to avoid race condition when running tests in parallel. - env::set_var(OTEL_RESOURCE_ATTRIBUTES, ""); - let detector = EnvResourceDetector::new(); - let resource = detector.detect(time::Duration::from_secs(5)); + let resource = detector.detect(Duration::from_secs(5)); assert!(resource.is_empty()); } @@ -130,37 +134,45 @@ mod tests { fn test_sdk_provided_resource_detector() { const SERVICE_NAME: &str = "service.name"; // Ensure no env var set - env::remove_var(OTEL_RESOURCE_ATTRIBUTES); let no_env = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); assert_eq!( no_env.get(Key::from_static_str(SERVICE_NAME)), Some(Value::from("unknown_service")), ); - env::set_var(OTEL_SERVICE_NAME, "test service"); - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); - assert_eq!( - with_service.get(Key::from_static_str(SERVICE_NAME)), - Some(Value::from("test service")), - ); - env::set_var(OTEL_SERVICE_NAME, ""); // clear the env var - - // Fall back to OTEL_RESOURCE_ATTRIBUTES - env::set_var(OTEL_RESOURCE_ATTRIBUTES, "service.name=test service1"); - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); - assert_eq!( - with_service.get(Key::from_static_str(SERVICE_NAME)), - Some(Value::from("test service1")) + temp_env::with_var(OTEL_SERVICE_NAME, Some("test service"), || { + let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + assert_eq!( + with_service.get(Key::from_static_str(SERVICE_NAME)), + Some(Value::from("test service")), + ) + }); + + temp_env::with_var( + OTEL_RESOURCE_ATTRIBUTES, + Some("service.name=test service1"), + || { + let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + assert_eq!( + with_service.get(Key::from_static_str(SERVICE_NAME)), + Some(Value::from("test service1")), + ) + }, ); // OTEL_SERVICE_NAME takes priority - env::set_var(OTEL_SERVICE_NAME, "test service"); - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); - assert_eq!( - with_service.get(Key::from_static_str(SERVICE_NAME)), - Some(Value::from("test service")) + temp_env::with_vars( + [ + (OTEL_SERVICE_NAME, Some("test service")), + (OTEL_RESOURCE_ATTRIBUTES, Some("service.name=test service3")), + ], + || { + let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + assert_eq!( + with_service.get(Key::from_static_str(SERVICE_NAME)), + Some(Value::from("test service")) + ); + }, ); - env::set_var(OTEL_RESOURCE_ATTRIBUTES, ""); - env::set_var(OTEL_SERVICE_NAME, ""); // clear the env var } } diff --git a/opentelemetry-sdk/src/resource/mod.rs b/opentelemetry-sdk/src/resource/mod.rs index 46808cefd3..79ce0122eb 100644 --- a/opentelemetry-sdk/src/resource/mod.rs +++ b/opentelemetry-sdk/src/resource/mod.rs @@ -255,7 +255,7 @@ mod tests { use super::*; use crate::resource::EnvResourceDetector; use std::collections::HashMap; - use std::{env, time}; + use std::time; #[test] fn new_resource() { @@ -339,20 +339,30 @@ mod tests { #[test] fn detect_resource() { - env::set_var("OTEL_RESOURCE_ATTRIBUTES", "key=value, k = v , a= x, a=z"); - env::set_var("irrelevant".to_uppercase(), "20200810"); - - let detector = EnvResourceDetector::new(); - let resource = - Resource::from_detectors(time::Duration::from_secs(5), vec![Box::new(detector)]); - assert_eq!( - resource, - Resource::new(vec![ - KeyValue::new("key", "value"), - KeyValue::new("k", "v"), - KeyValue::new("a", "x"), - KeyValue::new("a", "z"), - ]) + temp_env::with_vars( + [ + ( + "OTEL_RESOURCE_ATTRIBUTES", + Some("key=value, k = v , a= x, a=z"), + ), + ("IRRELEVANT", Some("20200810")), + ], + || { + let detector = EnvResourceDetector::new(); + let resource = Resource::from_detectors( + time::Duration::from_secs(5), + vec![Box::new(detector)], + ); + assert_eq!( + resource, + Resource::new(vec![ + KeyValue::new("key", "value"), + KeyValue::new("k", "v"), + KeyValue::new("a", "x"), + KeyValue::new("a", "z"), + ]) + ) + }, ) } } diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 93572cc8ba..cc0991acd7 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -303,45 +303,51 @@ mod tests { assert_service_name(custom_config_provider, Some("test_service")); // If `OTEL_RESOURCE_ATTRIBUTES` is set, read them automatically - env::set_var("OTEL_RESOURCE_ATTRIBUTES", "key1=value1, k2, k3=value2"); - let env_resource_provider = super::TracerProvider::builder().build(); - assert_eq!( - env_resource_provider.config().resource, - Cow::Owned(Resource::new(vec![ - KeyValue::new("telemetry.sdk.name", "opentelemetry"), - KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), - KeyValue::new("telemetry.sdk.language", "rust"), - KeyValue::new("key1", "value1"), - KeyValue::new("k3", "value2"), - KeyValue::new("service.name", "unknown_service"), - ])) + temp_env::with_var( + "OTEL_RESOURCE_ATTRIBUTES", + Some("key1=value1, k2, k3=value2"), + || { + let env_resource_provider = super::TracerProvider::builder().build(); + assert_eq!( + env_resource_provider.config().resource, + Cow::Owned(Resource::new(vec![ + KeyValue::new("telemetry.sdk.name", "opentelemetry"), + KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), + KeyValue::new("telemetry.sdk.language", "rust"), + KeyValue::new("key1", "value1"), + KeyValue::new("k3", "value2"), + KeyValue::new("service.name", "unknown_service"), + ])) + ); + }, ); // When `OTEL_RESOURCE_ATTRIBUTES` is set and also user provided config - env::set_var( + temp_env::with_var( "OTEL_RESOURCE_ATTRIBUTES", - "my-custom-key=env-val,k2=value2", - ); - let user_provided_resource_config_provider = super::TracerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::default().merge(&mut Resource::new(vec![ - KeyValue::new("my-custom-key", "my-custom-value"), - ]))), - ..Default::default() - }) - .build(); - assert_eq!( - user_provided_resource_config_provider.config().resource, - Cow::Owned(Resource::new(vec![ - KeyValue::new("telemetry.sdk.name", "opentelemetry"), - KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), - KeyValue::new("telemetry.sdk.language", "rust"), - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("k2", "value2"), - KeyValue::new("service.name", "unknown_service"), - ])) + Some("my-custom-key=env-val,k2=value2"), + || { + let user_provided_resource_config_provider = super::TracerProvider::builder() + .with_config(Config { + resource: Cow::Owned(Resource::default().merge(&mut Resource::new(vec![ + KeyValue::new("my-custom-key", "my-custom-value"), + ]))), + ..Default::default() + }) + .build(); + assert_eq!( + user_provided_resource_config_provider.config().resource, + Cow::Owned(Resource::new(vec![ + KeyValue::new("telemetry.sdk.name", "opentelemetry"), + KeyValue::new("telemetry.sdk.version", env!("CARGO_PKG_VERSION")), + KeyValue::new("telemetry.sdk.language", "rust"), + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("k2", "value2"), + KeyValue::new("service.name", "unknown_service"), + ])) + ); + }, ); - env::remove_var("OTEL_RESOURCE_ATTRIBUTES"); // If user provided a resource, it takes priority during collision. let no_service_name = super::TracerProvider::builder() diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 0f6811e6e0..cd156fd952 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -785,31 +785,36 @@ mod tests { #[test] fn test_build_batch_span_processor_builder() { - std::env::set_var(OTEL_BSP_MAX_EXPORT_BATCH_SIZE, "500"); - std::env::set_var(OTEL_BSP_EXPORT_TIMEOUT, "2046"); - std::env::set_var(OTEL_BSP_SCHEDULE_DELAY, "I am not number"); - - let mut builder = BatchSpanProcessor::builder(new_test_exporter().0, runtime::Tokio); - // export batch size cannot exceed max queue size - assert_eq!(builder.config.max_export_batch_size, 500); - assert_eq!( - builder.config.scheduled_delay, - Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT) - ); - assert_eq!( - builder.config.max_queue_size, - OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT - ); - assert_eq!( - builder.config.max_export_timeout, - Duration::from_millis(2046) - ); + let mut env_vars = vec![ + (OTEL_BSP_MAX_EXPORT_BATCH_SIZE, Some("500")), + (OTEL_BSP_SCHEDULE_DELAY, Some("I am not number")), + (OTEL_BSP_EXPORT_TIMEOUT, Some("2046")), + ]; + temp_env::with_vars(env_vars.clone(), || { + let builder = BatchSpanProcessor::builder(new_test_exporter().0, runtime::Tokio); + // export batch size cannot exceed max queue size + assert_eq!(builder.config.max_export_batch_size, 500); + assert_eq!( + builder.config.scheduled_delay, + Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + builder.config.max_queue_size, + OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT + ); + assert_eq!( + builder.config.max_export_timeout, + Duration::from_millis(2046) + ); + }); - std::env::set_var(OTEL_BSP_MAX_QUEUE_SIZE, "120"); - builder = BatchSpanProcessor::builder(new_test_exporter().0, runtime::Tokio); + env_vars.push((OTEL_BSP_MAX_QUEUE_SIZE, Some("120"))); - assert_eq!(builder.config.max_export_batch_size, 120); - assert_eq!(builder.config.max_queue_size, 120); + temp_env::with_vars(env_vars, || { + let builder = BatchSpanProcessor::builder(new_test_exporter().0, runtime::Tokio); + assert_eq!(builder.config.max_export_batch_size, 120); + assert_eq!(builder.config.max_queue_size, 120); + }); } #[tokio::test] diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index fd5f0436a3..34fc2a24ec 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -47,3 +47,4 @@ bytes = "1" futures-util = { version = "0.3", features = ["io"] } hyper = "0.14" opentelemetry_sdk = { default-features = false, features = ["trace", "testing"], path = "../opentelemetry-sdk" } +temp-env = "0.3.6" diff --git a/opentelemetry-zipkin/src/exporter/env.rs b/opentelemetry-zipkin/src/exporter/env.rs index ba934410da..75d552b24f 100644 --- a/opentelemetry-zipkin/src/exporter/env.rs +++ b/opentelemetry-zipkin/src/exporter/env.rs @@ -37,21 +37,22 @@ pub(crate) fn get_endpoint() -> String { #[test] fn test_collector_defaults() { // Ensure the variables are undefined. - env::remove_var(ENV_TIMEOUT); - env::remove_var(ENV_ENDPOINT); assert_eq!(DEFAULT_COLLECTOR_TIMEOUT, get_timeout()); assert_eq!(DEFAULT_COLLECTOR_ENDPOINT, get_endpoint()); // Bad Timeout Value - env::set_var(ENV_TIMEOUT, "a"); - assert_eq!(DEFAULT_COLLECTOR_TIMEOUT, get_timeout()); + temp_env::with_var(ENV_TIMEOUT, Some("a"), || { + assert_eq!(DEFAULT_COLLECTOR_TIMEOUT, get_timeout()); + }); // Good Timeout Value - env::set_var(ENV_TIMEOUT, "777"); - assert_eq!(Duration::from_millis(777), get_timeout()); + temp_env::with_var(ENV_TIMEOUT, Some("777"), || { + assert_eq!(Duration::from_millis(777), get_timeout()); + }); // Custom Endpoint let custom_endpoint = "https://example.com/api/v2/spans"; - env::set_var(ENV_ENDPOINT, custom_endpoint); - assert_eq!(custom_endpoint, get_endpoint()); + temp_env::with_var(ENV_ENDPOINT, Some(custom_endpoint), || { + assert_eq!(custom_endpoint, get_endpoint()); + }); } From d52ff83f300f46ebbd7d522a96eac4e7fc720eac Mon Sep 17 00:00:00 2001 From: Kevin Sun <88293697+kevi-sun@users.noreply.github.com> Date: Tue, 28 Nov 2023 03:55:00 +1100 Subject: [PATCH 43/68] opentelemetry-proto-support-feature-gen-grpcio-messages (#1316) In some cases (e.g. rust wasm), it is not possible to have network-related crates (e.g. grpcio) included, and we would like to support protobuf encoding support. so similar to the already existing feature `gen-tonic-messages`, here introduce a feature `gen-grpcio-messages` to support protobuf encoding without any network client/server included. ## Changes replaces some of existing `gen-grpcio` to `gen-grpcio-messages` enables feature `gen-grpcio` will automatically enable `gen-grpcio-messages` --- opentelemetry-proto/Cargo.toml | 3 ++- opentelemetry-proto/src/lib.rs | 13 +++++++++---- opentelemetry-proto/src/proto.rs | 3 ++- opentelemetry-proto/src/transform/common.rs | 6 +++--- opentelemetry-proto/src/transform/metrics.rs | 2 +- opentelemetry-proto/src/transform/trace.rs | 2 +- 6 files changed, 18 insertions(+), 11 deletions(-) diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 3a6d761b25..d738cc41e5 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -31,7 +31,8 @@ full = ["gen-tonic", "gen-grpcio", "trace", "logs", "metrics", "zpages", "with-s # crates used to generate rs files gen-tonic = ["gen-tonic-messages", "tonic/transport"] gen-tonic-messages = ["tonic", "prost"] -gen-grpcio = ["grpcio", "prost"] +gen-grpcio = ["gen-grpcio-messages", "grpcio"] +gen-grpcio-messages = ["prost"] # telemetry pillars and functions trace = ["opentelemetry/trace", "opentelemetry_sdk/trace"] diff --git a/opentelemetry-proto/src/lib.rs b/opentelemetry-proto/src/lib.rs index 1e0248bc6a..a5068f7ddb 100644 --- a/opentelemetry-proto/src/lib.rs +++ b/opentelemetry-proto/src/lib.rs @@ -17,8 +17,10 @@ //! - `zpages`: generate types that used in zPages. Currently only tracez related types will be generated. Currently supports `gen-tonic` or `gen-grpcio`. //! //! ## Creates used to generate files -//! - `gen-tonic`: generate rs files using [tonic](https://github.com/hyperium/tonic) and [prost](https://github.com/tokio-rs/prost). -//! - `gen-grpcio`: generate rs files using [grpcio](https://github.com/tikv/grpc-rs). +//! - `gen-tonic-messages`: generate rs files using [tonic](https://github.com/hyperium/tonic) and [prost](https://github.com/tokio-rs/prost). +//! - `gen-tonic`: adding tonic transport to "`gen-tonic-messages" +//! - `gen-grpcio-messages`: generate rs files using [grpcio](https://github.com/tikv/grpc-rs). +//! - `gen-grpcio`: adding gRPC core to "gen-grpcio-messages" //! //! ## Misc //! - `full`: enabled all features above. @@ -32,10 +34,13 @@ #[doc(hidden)] mod proto; -#[cfg(feature = "gen-grpcio")] +#[cfg(feature = "gen-grpcio-messages")] pub use proto::grpcio; #[cfg(feature = "gen-tonic-messages")] pub use proto::tonic; -mod transform; +pub mod transform; + +#[cfg(feature = "gen-grpcio-messages")] +pub use prost; diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index 3be6f783e7..419302a01f 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -76,10 +76,11 @@ pub mod tonic { pub use crate::transform::common::tonic::Attributes; } -#[cfg(feature = "gen-grpcio")] +#[cfg(feature = "gen-grpcio-messages")] /// Generated files using [`grpcio`](https://docs.rs/crate/grpcio) and [`grpcio-compiler`](https://docs.rs/grpcio-compiler) pub mod grpcio { /// Service stub and clients + #[cfg(feature = "gen-grpcio")] #[path = ""] pub mod collector { #[cfg(feature = "logs")] diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs index 7d6f00a71f..cff8854c8d 100644 --- a/opentelemetry-proto/src/transform/common.rs +++ b/opentelemetry-proto/src/transform/common.rs @@ -1,11 +1,11 @@ #[cfg(all( - any(feature = "gen-tonic-messages", feature = "gen-grpcio"), + any(feature = "gen-tonic-messages", feature = "gen-grpcio-messages"), any(feature = "trace", feature = "metrics", feature = "logs") ))] use std::time::{Duration, SystemTime, UNIX_EPOCH}; #[cfg(all( - any(feature = "gen-tonic-messages", feature = "gen-grpcio"), + any(feature = "gen-tonic-messages", feature = "gen-grpcio-messages"), any(feature = "trace", feature = "metrics", feature = "logs") ))] pub(crate) fn to_nanos(time: SystemTime) -> u64 { @@ -123,7 +123,7 @@ pub mod tonic { } } -#[cfg(feature = "gen-grpcio")] +#[cfg(feature = "gen-grpcio-messages")] pub mod grpcio { use crate::proto::grpcio::common::v1::{ any_value, AnyValue, ArrayValue, InstrumentationScope, KeyValue, diff --git a/opentelemetry-proto/src/transform/metrics.rs b/opentelemetry-proto/src/transform/metrics.rs index e1a919664d..f3b04bb6e9 100644 --- a/opentelemetry-proto/src/transform/metrics.rs +++ b/opentelemetry-proto/src/transform/metrics.rs @@ -343,7 +343,7 @@ pub mod tonic { } } -#[cfg(feature = "gen-grpcio")] +#[cfg(feature = "gen-grpcio-messages")] pub mod grpcio { use std::any::Any; use std::fmt; diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 616c531c75..a2dbf2dfe7 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -109,7 +109,7 @@ pub mod tonic { } } -#[cfg(feature = "gen-grpcio")] +#[cfg(feature = "gen-grpcio-messages")] pub mod grpcio { use crate::proto::grpcio::resource::v1::Resource; use crate::proto::grpcio::trace::v1::{span, status, ResourceSpans, ScopeSpans, Span, Status}; From fcd12eb2c2d379e50f685e43a3ac3d81c3902c99 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Mon, 27 Nov 2023 14:29:29 -0800 Subject: [PATCH 44/68] Fix CI check for docs (#1406) --- opentelemetry-proto/src/transform/common.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs index cff8854c8d..a6ddcff6b6 100644 --- a/opentelemetry-proto/src/transform/common.rs +++ b/opentelemetry-proto/src/transform/common.rs @@ -51,7 +51,7 @@ pub mod tonic { } } - /// Wrapper type for Vec<[`KeyValue`](crate::proto::tonic::common::v1::KeyValue)> + /// Wrapper type for Vec<`KeyValue`> #[derive(Default)] pub struct Attributes(pub ::std::vec::Vec); @@ -160,7 +160,7 @@ pub mod grpcio { } } - /// Wrapper type for Vec<[`KeyValue`](crate::proto::grpcio::common::v1::KeyValue)> + /// Wrapper type for Vec<`KeyValue`> #[derive(Default)] pub struct Attributes(pub ::std::vec::Vec); From 897e70a0936f11efcc05cfc9c342891fb2976f35 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Tue, 28 Nov 2023 00:32:55 -0800 Subject: [PATCH 45/68] Add tracing-http example (#1404) Modified version of the [tracingresponse](https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/examples/traceresponse) example (in `contrib` repo) to demonstrate context propagation from client to server. The example - Removes the code to propagate trace-context as part of response headers from server to client, as the W3C specs is still in draft (https://w3c.github.io/trace-context/#trace-context-http-response-headers-format), and also the propagator is part of contrib repo. - Modify the HTTP server and client code to look more complete and also demonstrate the context propagation across async delegates. **_Server_** - Enhance the server's request handling, by adding support for `/echo` and `/health` endpoints. Upon receiving the request, the server now creates a child span, linked to the originating remote span, and the request is forwarded to its respective delegate async task. Furthermore, within each async task, a subsequent child span is spawned, parented by the initial child span. This nested span creation exemplifies the effective propagation of tracing context through the multiple layers of async execution. **_Client_** - The client sends requests for `/echo` and `/health` within the context of the client root span. --- Cargo.toml | 1 + examples/README.md | 3 +- examples/tracing-http-propagator/Cargo.toml | 25 +++++ examples/tracing-http-propagator/README.md | 27 +++++ .../tracing-http-propagator/src/client.rs | 69 ++++++++++++ .../tracing-http-propagator/src/server.rs | 102 ++++++++++++++++++ 6 files changed, 225 insertions(+), 2 deletions(-) create mode 100644 examples/tracing-http-propagator/Cargo.toml create mode 100644 examples/tracing-http-propagator/README.md create mode 100644 examples/tracing-http-propagator/src/client.rs create mode 100644 examples/tracing-http-propagator/src/server.rs diff --git a/Cargo.toml b/Cargo.toml index 29471f38d9..b7d7f609cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "examples/metrics-advanced", "examples/logs-basic", "examples/tracing-grpc", + "examples/tracing-http-propagator", "examples/tracing-jaeger", "stress", ] diff --git a/examples/README.md b/examples/README.md index dbdcd5f898..d35a65eaa3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -20,13 +20,12 @@ This example uses following crates from this repo: Check this example if you want to understand *how to instrument metrics using opentelemetry*. -## traceresponse +## tracing-http-propagator **Tracing** This example uses following crates from this repo: - opentelemetry(tracing) - opentelemetry-http -- opentelemetry-contrib(TraceContextResponsePropagator) - opentelemetry-stdout ## tracing-grpc diff --git a/examples/tracing-http-propagator/Cargo.toml b/examples/tracing-http-propagator/Cargo.toml new file mode 100644 index 0000000000..3c369d1b04 --- /dev/null +++ b/examples/tracing-http-propagator/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "tracing-http-propagator" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +publish = false + +[[bin]] # Bin to run the http server +name = "http-server" +path = "src/server.rs" +doc = false + +[[bin]] # Bin to run the client +name = "http-client" +path = "src/client.rs" +doc = false + +[dependencies] +hyper = { version = "0.14", features = ["full"] } +tokio = { version = "1.0", features = ["full"] } +opentelemetry = { path = "../../opentelemetry" } +opentelemetry_sdk = { path = "../../opentelemetry-sdk" } +opentelemetry-http = { path = "../../opentelemetry-http" } +opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] } +opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" } \ No newline at end of file diff --git a/examples/tracing-http-propagator/README.md b/examples/tracing-http-propagator/README.md new file mode 100644 index 0000000000..a351e24623 --- /dev/null +++ b/examples/tracing-http-propagator/README.md @@ -0,0 +1,27 @@ +# HTTP Example + +This is a simple example using [hyper] that demonstrates tracing http request +from client to server. The example shows key aspects of tracing +such as: + +- Root Span (on Client) +- Child Span from a Remote Parent (on Server) +- Child Span created on the async function parented by the first level child (on Server) +- SpanContext Propagation (from Client to Server) +- Span Events +- Span Attributes +- Context propagation across async task boundaries. + +[hyper]: https://hyper.rs/ + +## Usage + +```shell +# Run server +$ cargo run --bin http-server + +# In another tab, run client +$ cargo run --bin http-client + +# The spans should be visible in stdout in the order that they were exported. +``` \ No newline at end of file diff --git a/examples/tracing-http-propagator/src/client.rs b/examples/tracing-http-propagator/src/client.rs new file mode 100644 index 0000000000..e08d3695cc --- /dev/null +++ b/examples/tracing-http-propagator/src/client.rs @@ -0,0 +1,69 @@ +use hyper::{body::Body, Client}; +use opentelemetry::{ + global, + trace::{SpanKind, TraceContextExt, Tracer}, + Context, KeyValue, +}; +use opentelemetry_http::HeaderInjector; +use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; +use opentelemetry_stdout::SpanExporter; + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + // Install stdout exporter pipeline to be able to retrieve the collected spans. + // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. + let provider = TracerProvider::builder() + .with_simple_exporter(SpanExporter::default()) + .build(); + + global::set_tracer_provider(provider); +} + +async fn send_request( + url: &str, + body_content: &str, + span_name: &str, +) -> std::result::Result<(), Box> { + let client = Client::new(); + let tracer = global::tracer("example/client"); + let span = tracer + .span_builder(String::from(span_name)) + .with_kind(SpanKind::Client) + .start(&tracer); + let cx = Context::current_with_span(span); + + let mut req = hyper::Request::builder().uri(url); + global::get_text_map_propagator(|propagator| { + propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())) + }); + let res = client + .request(req.body(Body::from(String::from(body_content)))?) + .await?; + + cx.span().add_event( + "Got response!".to_string(), + vec![KeyValue::new("status", res.status().to_string())], + ); + + Ok(()) +} + +#[tokio::main] +async fn main() -> std::result::Result<(), Box> { + init_tracer(); + + send_request( + "http://127.0.0.1:3000/health", + "Health Request!", + "server_health_check", + ) + .await?; + send_request( + "http://127.0.0.1:3000/echo", + "Echo Request!", + "server_echo_check", + ) + .await?; + + Ok(()) +} diff --git a/examples/tracing-http-propagator/src/server.rs b/examples/tracing-http-propagator/src/server.rs new file mode 100644 index 0000000000..a126db749c --- /dev/null +++ b/examples/tracing-http-propagator/src/server.rs @@ -0,0 +1,102 @@ +use hyper::{ + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, StatusCode, +}; +use opentelemetry::{ + global, + trace::{FutureExt, Span, SpanKind, TraceContextExt, Tracer}, + Context, KeyValue, +}; +use opentelemetry_http::HeaderExtractor; +use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; +use opentelemetry_semantic_conventions::trace; +use opentelemetry_stdout::SpanExporter; +use std::{convert::Infallible, net::SocketAddr}; + +// Utility function to extract the context from the incoming request headers +fn extract_context_from_request(req: &Request) -> Context { + global::get_text_map_propagator(|propagator| { + propagator.extract(&HeaderExtractor(req.headers())) + }) +} + +// Separate async function for the handle endpoint +async fn handle_health_check(_req: Request) -> Result, Infallible> { + let tracer = global::tracer("example/server"); + let mut span = tracer + .span_builder("health_check") + .with_kind(SpanKind::Internal) + .start(&tracer); + span.add_event("Health check accessed", vec![]); + let res = Response::new(Body::from("Server is up and running!")); + Ok(res) +} + +// Separate async function for the echo endpoint +async fn handle_echo(req: Request) -> Result, Infallible> { + let tracer = global::tracer("example/server"); + let mut span = tracer + .span_builder("echo") + .with_kind(SpanKind::Internal) + .start(&tracer); + span.add_event("Echoing back the request", vec![]); + let res = Response::new(req.into_body()); + Ok(res) +} + +async fn router(req: Request) -> Result, Infallible> { + // Extract the context from the incoming request headers + let parent_cx = extract_context_from_request(&req); + let response = { + // Create a span parenting the remote client span. + let tracer = global::tracer("example/server"); + let mut span = tracer + .span_builder("router") + .with_kind(SpanKind::Server) + .start_with_context(&tracer, &parent_cx); + + span.add_event("dispatching request", vec![]); + + let cx = Context::default().with_span(span); + match (req.method(), req.uri().path()) { + (&hyper::Method::GET, "/health") => handle_health_check(req).with_context(cx).await, + (&hyper::Method::GET, "/echo") => handle_echo(req).with_context(cx).await, + _ => { + cx.span() + .set_attribute(KeyValue::new(trace::HTTP_RESPONSE_STATUS_CODE, 404)); + let mut not_found = Response::default(); + *not_found.status_mut() = StatusCode::NOT_FOUND; + Ok(not_found) + } + } + }; + response +} + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + + // Install stdout exporter pipeline to be able to retrieve the collected spans. + // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production + // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. + let provider = TracerProvider::builder() + .with_simple_exporter(SpanExporter::default()) + .build(); + + global::set_tracer_provider(provider); +} + +#[tokio::main] +async fn main() { + init_tracer(); + let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(router)) }); + + let server = Server::bind(&addr).serve(make_svc); + + println!("Listening on {addr}"); + if let Err(e) = server.await { + eprintln!("server error: {e}"); + } +} From c0104d3f542de9711e411c60e437188f0310b070 Mon Sep 17 00:00:00 2001 From: Matthew Shapiro Date: Tue, 28 Nov 2023 12:05:44 -0500 Subject: [PATCH 46/68] Precalculated attribute set hashes (#1407) The hash of `AttributeSet`s are expensive to compute, as they have to be computed for each key and value in the attribute set. This hash is used by the `ValueMap` to look up if we are already aggregating a time series for this set of attributes or not. Since this hashmap lookup occurs inside a mutex lock, no other counters can execute their `add()` calls while this hash is being calculated, and therefore contention in high throughput scenarios exists. This PR calculates and caches the hashmap at creation time. This improves throughput because the hashmap is calculated by the thread creating the `AttributeSet` and is performed outside of any mutex locks, meaning hashes can be computed in parallel and the time spent within a mutex lock is reduced. As larger sets of attributes are used for time series, the benefits of reduction of lock times should be greater. The stress test results of this change for different thread counts are: | Thread Count | Main | PR | | -------------- | ---------- | --------- | | 2 | 3,376,040 | 3,310,920 | | 3 | 5,908,640 | 5,807,240 | | 4 | 3,382,040 | 8,094,960 | | 5 | 1,212,640 | 9,086,520 | | 6 | 1,225,280 | 6,595,600 | The non-precomputed hashes starts feeling contention with 4 threads, and drops substantially after that while precomputed hashes doesn't start seeing contention until 6 threads, and even then we still have 5-6x more throughput after contention due to reduced locking times. While these benchmarks may not be "realistic" (since most applications will be doing more work in between counter updates) it does show a benefit of better parallelism and the opportunity to reduce lock contention at the cost of only 8 bytes per time series (so a total of 16KB additional memory at maximum cardinality). --- opentelemetry-sdk/src/attributes/set.rs | 32 ++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/attributes/set.rs b/opentelemetry-sdk/src/attributes/set.rs index 06490879a1..ae5d5a4a73 100644 --- a/opentelemetry-sdk/src/attributes/set.rs +++ b/opentelemetry-sdk/src/attributes/set.rs @@ -1,3 +1,4 @@ +use std::collections::hash_map::DefaultHasher; use std::collections::HashSet; use std::{ cmp::Ordering, @@ -104,13 +105,13 @@ impl Eq for HashKeyValue {} /// /// This must implement [Hash], [PartialEq], and [Eq] so it may be used as /// HashMap keys and other de-duplication methods. -#[derive(Clone, Default, Debug, Hash, PartialEq, Eq)] -pub struct AttributeSet(Vec); +#[derive(Clone, Default, Debug, PartialEq, Eq)] +pub struct AttributeSet(Vec, u64); impl From<&[KeyValue]> for AttributeSet { fn from(values: &[KeyValue]) -> Self { let mut seen_keys = HashSet::with_capacity(values.len()); - let mut vec = values + let vec = values .iter() .rev() .filter_map(|kv| { @@ -121,25 +122,34 @@ impl From<&[KeyValue]> for AttributeSet { } }) .collect::>(); - vec.sort_unstable(); - AttributeSet(vec) + AttributeSet::new(vec) } } impl From<&Resource> for AttributeSet { fn from(values: &Resource) -> Self { - let mut vec = values + let vec = values .iter() .map(|(key, value)| HashKeyValue(KeyValue::new(key.clone(), value.clone()))) .collect::>(); - vec.sort_unstable(); - AttributeSet(vec) + AttributeSet::new(vec) } } impl AttributeSet { + fn new(mut values: Vec) -> Self { + values.sort_unstable(); + let mut hasher = DefaultHasher::new(); + values.iter().fold(&mut hasher, |mut hasher, item| { + item.hash(&mut hasher); + hasher + }); + + AttributeSet(values, hasher.finish()) + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.0.len() @@ -163,3 +173,9 @@ impl AttributeSet { self.0.iter().map(|kv| (&kv.0.key, &kv.0.value)) } } + +impl Hash for AttributeSet { + fn hash(&self, state: &mut H) { + state.write_u64(self.1) + } +} From cd23ec279ad8954001e7b855f3132c2030312453 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Wed, 29 Nov 2023 17:56:20 -0500 Subject: [PATCH 47/68] Remove old global meter provider shutdown method (#1412) This change removes the old `global::shutdown_meter_provider` method which is not part of the metrics API spec, and properly documents the `SdkMeterProvider::shutdown` method which is spec compliant. --- opentelemetry-sdk/src/metrics/meter_provider.rs | 17 +++++++++-------- opentelemetry/CHANGELOG.md | 3 ++- opentelemetry/src/global/metrics.rs | 8 -------- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index 7ea1ec9b9a..fd10b832ea 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -58,30 +58,31 @@ impl SdkMeterProvider { /// use opentelemetry_sdk::metrics::SdkMeterProvider; /// /// fn init_metrics() -> SdkMeterProvider { + /// // Setup metric pipelines with readers + views, default has no + /// // readers so nothing is exported. /// let provider = SdkMeterProvider::default(); /// /// // Set provider to be used as global meter provider /// let _ = global::set_meter_provider(provider.clone()); /// - /// // Setup metric pipelines with readers + views - /// /// provider /// } /// - /// fn main() { + /// fn main() -> Result<(), Box> { /// let provider = init_metrics(); /// /// // create instruments + record measurements /// /// // force all instruments to flush - /// provider.force_flush().unwrap(); + /// provider.force_flush()?; /// /// // record more measurements.. /// - /// // dropping provider and shutting down global provider ensure all - /// // remaining metrics data are exported - /// drop(provider); - /// global::shutdown_meter_provider(); + /// // shutdown ensures any cleanup required by the provider is done, + /// // and also invokes shutdown on the readers. + /// provider.shutdown()?; + /// + /// Ok(()) /// } /// ``` pub fn force_flush(&self) -> Result<()> { diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 0e7aebd62f..77c5b52a93 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -10,7 +10,6 @@ of entries, so moving from `IndexMap` to `HashMap` offers slight performance gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of `LogRecord`. [#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) - - Add `TextMapCompositePropagator` [#1373](https://github.com/open-telemetry/opentelemetry-rust/pull/1373) ### Removed @@ -19,6 +18,8 @@ gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of `HashMap`. [#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) - Remove API for Creating Histograms with signed integers. [#1371](https://github.com/open-telemetry/opentelemetry-rust/pull/1371) +- Remove `global::shutdown_meter_provider`, use `SdkMeterProvider::shutdown` + directly instead (#1412). ## [v0.21.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.20.0...v0.21.0) diff --git a/opentelemetry/src/global/metrics.rs b/opentelemetry/src/global/metrics.rs index 549a4fc24c..c40477e5ac 100644 --- a/opentelemetry/src/global/metrics.rs +++ b/opentelemetry/src/global/metrics.rs @@ -98,14 +98,6 @@ where *global_provider = GlobalMeterProvider::new(new_provider); } -/// Shut down the current meter global meter provider. -pub fn shutdown_meter_provider() { - let mut global_provider = GLOBAL_METER_PROVIDER - .write() - .expect("GLOBAL_METER_PROVIDER RwLock poisoned"); - *global_provider = GlobalMeterProvider::new(metrics::noop::NoopMeterProvider::new()); -} - /// Returns an instance of the currently configured global [`MeterProvider`] /// through [`GlobalMeterProvider`]. pub fn meter_provider() -> GlobalMeterProvider { From 8b838fc07e992dd4c39db5375eed64cbe8b4313c Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 30 Nov 2023 08:53:18 -0800 Subject: [PATCH 48/68] Add tests for tracing appender (#1415) --- opentelemetry-appender-tracing/Cargo.toml | 1 + opentelemetry-appender-tracing/src/layer.rs | 142 ++++++++++++++++++++ opentelemetry/src/logs/record.rs | 2 +- 3 files changed, 144 insertions(+), 1 deletion(-) diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index 7c2e389ebc..c0775d2f14 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -24,3 +24,4 @@ opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } [features] logs_level_enabled = ["opentelemetry/logs_level_enabled", "opentelemetry_sdk/logs_level_enabled"] default = ["logs_level_enabled"] +testing = ["opentelemetry_sdk/testing"] \ No newline at end of file diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index f704f4810d..6c9232f043 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -140,3 +140,145 @@ const fn severity_of_level(level: &Level) -> Severity { Level::ERROR => Severity::Error, } } + +#[cfg(test)] +mod tests { + use crate::layer; + use opentelemetry::logs::Severity; + use opentelemetry::trace::TracerProvider as _; + use opentelemetry::trace::{TraceContextExt, TraceFlags, Tracer}; + use opentelemetry::{logs::AnyValue, Key}; + use opentelemetry_sdk::logs::LoggerProvider; + use opentelemetry_sdk::testing::logs::InMemoryLogsExporter; + use opentelemetry_sdk::trace::{config, Sampler, TracerProvider}; + use tracing::error; + use tracing_subscriber::layer::SubscriberExt; + + // cargo test --features=testing + #[test] + fn tracing_appender_standalone() { + // Arrange + let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + // avoiding setting tracing subscriber as global as that does not + // play well with unit tests. + let _guard = tracing::subscriber::set_default(subscriber); + + // Act + error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + logger_provider.force_flush(); + + // Assert TODO: move to helper methods + let exported_logs = exporter + .get_emitted_logs() + .expect("Logs are expected to be exported."); + assert_eq!(exported_logs.len(), 1); + let log = exported_logs + .get(0) + .expect("Atleast one log is expected to be present."); + + // Validate common fields + assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number, Some(Severity::Error)); + + // Validate trace context is none. + assert!(log.record.trace_context.is_none()); + + // Validate attributes + let attributes: Vec<(Key, AnyValue)> = log + .record + .attributes + .clone() + .expect("Attributes are expected"); + assert_eq!(attributes.len(), 4); + assert!(attributes.contains(&(Key::new("name"), "my-event-name".into()))); + assert!(attributes.contains(&(Key::new("event_id"), 20.into()))); + assert!(attributes.contains(&(Key::new("user_name"), "otel".into()))); + assert!(attributes.contains(&(Key::new("user_email"), "otel@opentelemetry.io".into()))); + } + + #[test] + fn tracing_appender_inside_tracing_context() { + // Arrange + let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + // avoiding setting tracing subscriber as global as that does not + // play well with unit tests. + let _guard = tracing::subscriber::set_default(subscriber); + + // setup tracing as well. + let tracer_provider = TracerProvider::builder() + .with_config(config().with_sampler(Sampler::AlwaysOn)) + .build(); + let tracer = tracer_provider.tracer("test-tracer"); + + // Act + let (trace_id_expected, span_id_expected) = tracer.in_span("test-span", |cx| { + let trace_id = cx.span().span_context().trace_id(); + let span_id = cx.span().span_context().span_id(); + + // logging is done inside span context. + error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + (trace_id, span_id) + }); + + logger_provider.force_flush(); + + // Assert TODO: move to helper methods + let exported_logs = exporter + .get_emitted_logs() + .expect("Logs are expected to be exported."); + assert_eq!(exported_logs.len(), 1); + let log = exported_logs + .get(0) + .expect("Atleast one log is expected to be present."); + + // validate common fields. + assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number, Some(Severity::Error)); + + // validate trace context. + assert!(log.record.trace_context.is_some()); + assert_eq!( + log.record.trace_context.as_ref().unwrap().trace_id, + trace_id_expected + ); + assert_eq!( + log.record.trace_context.as_ref().unwrap().span_id, + span_id_expected + ); + assert_eq!( + log.record + .trace_context + .as_ref() + .unwrap() + .trace_flags + .unwrap(), + TraceFlags::SAMPLED + ); + + // validate attributes. + let attributes: Vec<(Key, AnyValue)> = log + .record + .attributes + .clone() + .expect("Attributes are expected"); + assert_eq!(attributes.len(), 4); + assert!(attributes.contains(&(Key::new("name"), "my-event-name".into()))); + assert!(attributes.contains(&(Key::new("event_id"), 20.into()))); + assert!(attributes.contains(&(Key::new("user_name"), "otel".into()))); + assert!(attributes.contains(&(Key::new("user_email"), "otel@opentelemetry.io".into()))); + } +} diff --git a/opentelemetry/src/logs/record.rs b/opentelemetry/src/logs/record.rs index 0bd7604aea..dba4c02ef6 100644 --- a/opentelemetry/src/logs/record.rs +++ b/opentelemetry/src/logs/record.rs @@ -75,7 +75,7 @@ impl From<&SpanContext> for TraceContext { } /// Value types for representing arbitrary values in a log record. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum AnyValue { /// An integer value Int(i64), From 772f75ad487b3553db02d53c3aa9bbe12f67728c Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 30 Nov 2023 09:24:14 -0800 Subject: [PATCH 49/68] Invalid View to proceed as if view did not exist (#1408) --- opentelemetry-sdk/src/metrics/mod.rs | 53 +++++++++++++++++++++++++++ opentelemetry-sdk/src/metrics/view.rs | 3 +- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 9eb34d9303..0b549f700f 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -209,4 +209,57 @@ mod tests { let datapoint = &sum.data_points[0]; assert_eq!(datapoint.value, 15); } + + // "multi_thread" tokio flavor must be used else flush won't + // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn histogram_aggregation_with_invalid_aggregation_should_proceed_as_if_view_not_exist() { + // Run this test with stdout enabled to see output. + // cargo test histogram_aggregation_with_invalid_aggregation_should_proceed_as_if_view_not_exist --features=metrics,testing -- --nocapture + + // Arrange + let exporter = InMemoryMetricsExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let criteria = Instrument::new().name("test_histogram"); + let stream_invalid_aggregation = Stream::new() + .aggregation(Aggregation::ExplicitBucketHistogram { + boundaries: vec![0.9, 1.9, 1.2, 1.3, 1.4, 1.5], // invalid boundaries + record_min_max: false, + }) + .name("test_histogram_renamed") + .unit(Unit::new("test_unit_renamed")); + + let view = + new_view(criteria, stream_invalid_aggregation).expect("Expected to create a new view"); + let meter_provider = SdkMeterProvider::builder() + .with_reader(reader) + .with_view(view) + .build(); + + // Act + let meter = meter_provider.meter("test"); + let histogram = meter + .f64_histogram("test_histogram") + .with_unit(Unit::new("test_unit")) + .init(); + + histogram.record(1.5, &[KeyValue::new("key1", "value1")]); + meter_provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!(!resource_metrics.is_empty()); + let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; + assert_eq!( + metric.name, "test_histogram", + "View rename should be ignored and original name retained." + ); + assert_eq!( + metric.unit.as_str(), + "test_unit", + "View rename of unit should be ignored and original unit retained." + ); + } } diff --git a/opentelemetry-sdk/src/metrics/view.rs b/opentelemetry-sdk/src/metrics/view.rs index 23758f3bda..b6f1a9bb94 100644 --- a/opentelemetry-sdk/src/metrics/view.rs +++ b/opentelemetry-sdk/src/metrics/view.rs @@ -139,9 +139,10 @@ pub fn new_view(criteria: Instrument, mask: Stream) -> Result> { Ok(_) => agg = Some(ma.clone()), Err(err) => { global::handle_error(MetricsError::Other(format!( - "{}, not using aggregation with view. criteria: {:?}, mask: {:?}", + "{}, Proceeding as if View did not exist. criteria: {:?}, mask: {:?}", err, err_msg_criteria, mask ))); + return Ok(Box::new(empty_view)); } } } From 359424776cbcb91953e9019f0ec88ba662a86bb7 Mon Sep 17 00:00:00 2001 From: Matthew Shapiro Date: Thu, 30 Nov 2023 17:32:43 -0500 Subject: [PATCH 50/68] Ignore intellij directory (#1417) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index e64d25e133..9906874e2d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ */target/ **/*.rs.bk Cargo.lock +/.idea/ From 0862af12857cfa92d104957f2dda3c2725a88577 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 30 Nov 2023 15:26:59 -0800 Subject: [PATCH 51/68] Add tests to validate spatial aggregation (#1416) --- opentelemetry-sdk/src/metrics/mod.rs | 166 ++++++++++++++++++++++++++ opentelemetry-sdk/src/metrics/view.rs | 2 +- 2 files changed, 167 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 0b549f700f..021ee3d469 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -262,4 +262,170 @@ mod tests { "View rename of unit should be ignored and original unit retained." ); } + + // "multi_thread" tokio flavor must be used else flush won't + // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[ignore = "Spatial aggregation is not yet implemented."] + async fn spatial_aggregation_when_view_drops_attributes_observable_counter() { + // cargo test spatial_aggregation_when_view_drops_attributes_observable_counter --features=metrics,testing + + // Arrange + let exporter = InMemoryMetricsExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let criteria = Instrument::new().name("my_observable_counter"); + // View drops all attributes. + let stream_invalid_aggregation = Stream::new().allowed_attribute_keys(vec![]); + + let view = + new_view(criteria, stream_invalid_aggregation).expect("Expected to create a new view"); + let meter_provider = SdkMeterProvider::builder() + .with_reader(reader) + .with_view(view) + .build(); + + // Act + let meter = meter_provider.meter("test"); + let observable_counter = meter.u64_observable_counter("my_observable_counter").init(); + + // Normally, these callbacks would generate 3 time-series, but since the view + // drops all attributes, we expect only 1 time-series. + meter + .register_callback(&[observable_counter.as_any()], move |observer| { + observer.observe_u64( + &observable_counter, + 100, + [ + KeyValue::new("statusCode", "200"), + KeyValue::new("verb", "get"), + ] + .as_ref(), + ); + + observer.observe_u64( + &observable_counter, + 100, + [ + KeyValue::new("statusCode", "200"), + KeyValue::new("verb", "post"), + ] + .as_ref(), + ); + + observer.observe_u64( + &observable_counter, + 100, + [ + KeyValue::new("statusCode", "500"), + KeyValue::new("verb", "get"), + ] + .as_ref(), + ); + }) + .expect("Expected to register callback"); + + meter_provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!(!resource_metrics.is_empty()); + let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; + assert_eq!(metric.name, "my_observable_counter",); + + let sum = metric + .data + .as_any() + .downcast_ref::>() + .expect("Sum aggregation expected for ObservableCounter instruments by default"); + + // Expecting 1 time-series only, as the view drops all attributes resulting + // in a single time-series. + // This is failing today, due to lack of support for spatial aggregation. + assert_eq!(sum.data_points.len(), 1); + + // find and validate the single datapoint + let data_point = &sum.data_points[0]; + assert_eq!(data_point.value, 300); + } + + // "multi_thread" tokio flavor must be used else flush won't + // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[ignore = "Spatial aggregation is not yet implemented."] + async fn spatial_aggregation_when_view_drops_attributes_counter() { + // cargo test spatial_aggregation_when_view_drops_attributes_counter --features=metrics,testing + + // Arrange + let exporter = InMemoryMetricsExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let criteria = Instrument::new().name("my_counter"); + // View drops all attributes. + let stream_invalid_aggregation = Stream::new().allowed_attribute_keys(vec![]); + + let view = + new_view(criteria, stream_invalid_aggregation).expect("Expected to create a new view"); + let meter_provider = SdkMeterProvider::builder() + .with_reader(reader) + .with_view(view) + .build(); + + // Act + let meter = meter_provider.meter("test"); + let counter = meter.u64_counter("my_counter").init(); + + // Normally, this would generate 3 time-series, but since the view + // drops all attributes, we expect only 1 time-series. + counter.add( + 10, + [ + KeyValue::new("statusCode", "200"), + KeyValue::new("verb", "Get"), + ] + .as_ref(), + ); + + counter.add( + 10, + [ + KeyValue::new("statusCode", "500"), + KeyValue::new("verb", "Get"), + ] + .as_ref(), + ); + + counter.add( + 10, + [ + KeyValue::new("statusCode", "200"), + KeyValue::new("verb", "Post"), + ] + .as_ref(), + ); + + meter_provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!(!resource_metrics.is_empty()); + let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; + assert_eq!(metric.name, "my_counter",); + + let sum = metric + .data + .as_any() + .downcast_ref::>() + .expect("Sum aggregation expected for Counter instruments by default"); + + // Expecting 1 time-series only, as the view drops all attributes resulting + // in a single time-series. + // This is failing today, due to lack of support for spatial aggregation. + assert_eq!(sum.data_points.len(), 1); + // find and validate the single datapoint + let data_point = &sum.data_points[0]; + assert_eq!(data_point.value, 30); + } } diff --git a/opentelemetry-sdk/src/metrics/view.rs b/opentelemetry-sdk/src/metrics/view.rs index b6f1a9bb94..58c583aba9 100644 --- a/opentelemetry-sdk/src/metrics/view.rs +++ b/opentelemetry-sdk/src/metrics/view.rs @@ -139,7 +139,7 @@ pub fn new_view(criteria: Instrument, mask: Stream) -> Result> { Ok(_) => agg = Some(ma.clone()), Err(err) => { global::handle_error(MetricsError::Other(format!( - "{}, Proceeding as if View did not exist. criteria: {:?}, mask: {:?}", + "{}, proceeding as if view did not exist. criteria: {:?}, mask: {:?}", err, err_msg_criteria, mask ))); return Ok(Box::new(empty_view)); From 10456d4684e32a532c9c2b0bef82a315e3ea5154 Mon Sep 17 00:00:00 2001 From: Ho Kim Date: Sat, 2 Dec 2023 02:49:12 +0900 Subject: [PATCH 52/68] Add schemars support for opentelemetry-proto (#1419) --- opentelemetry-proto/CHANGELOG.md | 4 ++++ opentelemetry-proto/Cargo.toml | 2 ++ .../opentelemetry.proto.collector.logs.v1.rs | 3 +++ ...pentelemetry.proto.collector.metrics.v1.rs | 3 +++ .../opentelemetry.proto.collector.trace.v1.rs | 3 +++ .../tonic/opentelemetry.proto.common.v1.rs | 6 ++++++ .../tonic/opentelemetry.proto.logs.v1.rs | 6 ++++++ .../tonic/opentelemetry.proto.metrics.v1.rs | 21 +++++++++++++++++++ .../tonic/opentelemetry.proto.resource.v1.rs | 1 + .../tonic/opentelemetry.proto.trace.v1.rs | 9 ++++++++ .../tonic/opentelemetry.proto.tracez.v1.rs | 4 ++++ opentelemetry-proto/tests/grpc_build.rs | 4 ++++ scripts/lint.sh | 3 +++ 13 files changed, 69 insertions(+) diff --git a/opentelemetry-proto/CHANGELOG.md b/opentelemetry-proto/CHANGELOG.md index 7d9002c179..b0f410a9cb 100644 --- a/opentelemetry-proto/CHANGELOG.md +++ b/opentelemetry-proto/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +### Added + +- Add `schemars::JsonSchema` trait support with `with-schemars` feature (#1419) + ## v0.4.0 ### Added diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index d738cc41e5..cb87d6177e 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -41,6 +41,7 @@ logs = ["opentelemetry/logs", "opentelemetry_sdk/logs"] zpages = ["trace"] # add ons +with-schemars = ["schemars"] with-serde = ["serde"] [dependencies] @@ -49,6 +50,7 @@ tonic = { version = "0.9.0", default-features = false, optional = true, features prost = { version = "0.11.0", optional = true } opentelemetry = { version = "0.21", default-features = false, path = "../opentelemetry" } opentelemetry_sdk = { version = "0.21", default-features = false, path = "../opentelemetry-sdk" } +schemars = { version = "0.8", optional = true } serde = { version = "1.0", optional = true, features = ["serde_derive"] } [dev-dependencies] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs index 432e63a05c..bd8e8e0c42 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs @@ -1,3 +1,4 @@ +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -12,6 +13,7 @@ pub struct ExportLogsServiceRequest { super::super::super::logs::v1::ResourceLogs, >, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -34,6 +36,7 @@ pub struct ExportLogsServiceResponse { #[prost(message, optional, tag = "1")] pub partial_success: ::core::option::Option, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs index 158cd0a3ba..c230e2fc96 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs @@ -1,3 +1,4 @@ +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -12,6 +13,7 @@ pub struct ExportMetricsServiceRequest { super::super::super::metrics::v1::ResourceMetrics, >, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -34,6 +36,7 @@ pub struct ExportMetricsServiceResponse { #[prost(message, optional, tag = "1")] pub partial_success: ::core::option::Option, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs index ebae6fe45e..118cf7d8c6 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs @@ -1,3 +1,4 @@ +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -12,6 +13,7 @@ pub struct ExportTraceServiceRequest { super::super::super::trace::v1::ResourceSpans, >, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -34,6 +36,7 @@ pub struct ExportTraceServiceResponse { #[prost(message, optional, tag = "1")] pub partial_success: ::core::option::Option, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs index 25e01d78b2..95ee152f13 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs @@ -1,6 +1,7 @@ /// AnyValue is used to represent any type of attribute value. AnyValue may contain a /// primitive value such as a string or integer or it may contain an arbitrary nested /// object containing arrays, key-value lists and primitives. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -14,6 +15,7 @@ pub struct AnyValue { pub mod any_value { /// The value is one of the listed fields. It is valid for all values to be unspecified /// in which case this AnyValue is considered to be "empty". + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -36,6 +38,7 @@ pub mod any_value { } /// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message /// since oneof in AnyValue does not allow repeated fields. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -49,6 +52,7 @@ pub struct ArrayValue { /// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to /// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches /// are semantically equivalent. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -62,6 +66,7 @@ pub struct KeyValueList { } /// KeyValue is a key-value pair that is used to store Span attributes, Link /// attributes, etc. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -73,6 +78,7 @@ pub struct KeyValue { } /// InstrumentationScope is a message representing the instrumentation scope information /// such as the fully qualified name and version. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs index 3a6ff925f3..8edf2be3f9 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -8,6 +8,7 @@ /// /// When new fields are added into this message, the OTLP request MUST be updated /// as well. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -21,6 +22,7 @@ pub struct LogsData { pub resource_logs: ::prost::alloc::vec::Vec, } /// A collection of ScopeLogs from a Resource. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -38,6 +40,7 @@ pub struct ResourceLogs { pub schema_url: ::prost::alloc::string::String, } /// A collection of Logs produced by a Scope. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -56,6 +59,7 @@ pub struct ScopeLogs { } /// A log record according to OpenTelemetry Log Data Model: /// +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -138,6 +142,7 @@ pub struct LogRecord { pub span_id: ::prost::alloc::vec::Vec, } /// Possible values for LogRecord.SeverityNumber. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -241,6 +246,7 @@ impl SeverityNumber { /// /// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) /// +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs index e2ca887657..6322b594b0 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs @@ -8,6 +8,7 @@ /// /// When new fields are added into this message, the OTLP request MUST be updated /// as well. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -21,6 +22,7 @@ pub struct MetricsData { pub resource_metrics: ::prost::alloc::vec::Vec, } /// A collection of ScopeMetrics from a Resource. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -38,6 +40,7 @@ pub struct ResourceMetrics { pub schema_url: ::prost::alloc::string::String, } /// A collection of Metrics produced by an Scope. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -139,6 +142,7 @@ pub struct ScopeMetrics { /// to support correct rate calculation. Although it may be omitted /// when the start time is truly unknown, setting StartTimeUnixNano is /// strongly encouraged. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -164,6 +168,7 @@ pub mod metric { /// Data determines the aggregation type (if any) of the metric, what is the /// reported value type for the data points, as well as the relatationship to /// the time interval over which they are reported. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -189,6 +194,7 @@ pub mod metric { /// aggregation, regardless of aggregation temporalities. Therefore, /// AggregationTemporality is not included. Consequently, this also means /// "StartTimeUnixNano" is ignored for all data points. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -198,6 +204,7 @@ pub struct Gauge { } /// Sum represents the type of a scalar metric that is calculated as a sum of all /// reported measurements over a time interval. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -214,6 +221,7 @@ pub struct Sum { } /// Histogram represents the type of a metric that is calculated by aggregating /// as a Histogram of all reported measurements over a time interval. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -227,6 +235,7 @@ pub struct Histogram { } /// ExponentialHistogram represents the type of a metric that is calculated by aggregating /// as a ExponentialHistogram of all reported double measurements over a time interval. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -244,6 +253,7 @@ pub struct ExponentialHistogram { /// data type. These data points cannot always be merged in a meaningful way. /// While they can be useful in some applications, histogram data points are /// recommended for new applications. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -253,6 +263,7 @@ pub struct Summary { } /// NumberDataPoint is a single data point in a timeseries that describes the /// time-varying scalar value of a metric. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -293,6 +304,7 @@ pub struct NumberDataPoint { pub mod number_data_point { /// The value itself. A point is considered invalid when one of the recognized /// value fields is not present inside this oneof. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -313,6 +325,7 @@ pub mod number_data_point { /// If the histogram does not contain the distribution of values, then both /// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and /// "sum" are known. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -395,6 +408,7 @@ pub struct HistogramDataPoint { /// summary statistics for a population of values, it may optionally contain the /// distribution of those values across a set of buckets. /// +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -493,6 +507,7 @@ pub struct ExponentialHistogramDataPoint { pub mod exponential_histogram_data_point { /// Buckets are a set of bucket counts, encoded in a contiguous array /// of counts. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -517,6 +532,7 @@ pub mod exponential_histogram_data_point { } /// SummaryDataPoint is a single data point in a timeseries that describes the /// time-varying values of a Summary metric. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -572,6 +588,7 @@ pub mod summary_data_point { /// /// See the following issue for more context: /// + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -591,6 +608,7 @@ pub mod summary_data_point { /// Exemplars also hold information about the environment when the measurement /// was recorded, for example the span and trace ID of the active span when the /// exemplar was recorded. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -629,6 +647,7 @@ pub mod exemplar { /// The value of the measurement that was recorded. An exemplar is /// considered invalid when one of the recognized value fields is not present /// inside this oneof. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -642,6 +661,7 @@ pub mod exemplar { /// AggregationTemporality defines how a metric aggregator reports aggregated /// values. It describes how those values relate to the time interval over /// which they are aggregated. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -739,6 +759,7 @@ impl AggregationTemporality { /// /// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK /// +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs index 22adcbb22d..6ce1b1ed25 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs @@ -1,4 +1,5 @@ /// Resource information. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs index 288f536383..67314a0211 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -8,6 +8,7 @@ /// /// When new fields are added into this message, the OTLP request MUST be updated /// as well. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -21,6 +22,7 @@ pub struct TracesData { pub resource_spans: ::prost::alloc::vec::Vec, } /// A collection of ScopeSpans from a Resource. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -38,6 +40,7 @@ pub struct ResourceSpans { pub schema_url: ::prost::alloc::string::String, } /// A collection of Spans produced by an InstrumentationScope. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -57,6 +60,7 @@ pub struct ScopeSpans { /// A Span represents a single operation performed by a single component of the system. /// /// The next available field id is 17. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -163,6 +167,7 @@ pub struct Span { pub mod span { /// Event is a time-stamped annotation of the span, consisting of user-supplied /// text description and key-value pairs. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -190,6 +195,7 @@ pub mod span { /// different trace. For example, this can be used in batching operations, /// where a single batch handler processes multiple requests from different /// traces or when the handler receives a request from a different project. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -218,6 +224,7 @@ pub mod span { } /// SpanKind is the type of span. Can be used to specify additional relationships between spans /// in addition to a parent/child relationship. + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive( Clone, @@ -284,6 +291,7 @@ pub mod span { } /// The Status type defines a logical error model that is suitable for different /// programming environments, including REST APIs and RPC APIs. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -299,6 +307,7 @@ pub struct Status { pub mod status { /// For the semantics of status codes see /// + #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[derive( Clone, diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs index 611a523665..88989fcde2 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs @@ -1,3 +1,4 @@ +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -12,6 +13,7 @@ pub struct TracezCounts { #[prost(uint32, tag = "4")] pub error: u32, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -36,6 +38,7 @@ pub struct LatencyData { #[prost(message, repeated, tag = "8")] pub links: ::prost::alloc::vec::Vec, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -58,6 +61,7 @@ pub struct RunningData { #[prost(message, repeated, tag = "7")] pub links: ::prost::alloc::vec::Vec, } +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index 822cebc2ec..4ba0a3c64d 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -59,6 +59,10 @@ fn build_tonic() { .build_client(true) .server_mod_attribute(".", "#[cfg(feature = \"gen-tonic\")]") .client_mod_attribute(".", "#[cfg(feature = \"gen-tonic\")]") + .type_attribute( + ".", + "#[cfg_attr(feature = \"with-schemars\", derive(schemars::JsonSchema))]", + ) .type_attribute( ".", "#[cfg_attr(feature = \"with-serde\", derive(serde::Serialize, serde::Deserialize))]", diff --git a/scripts/lint.sh b/scripts/lint.sh index f6b697011b..1a01f2a0ca 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -44,13 +44,16 @@ if rustup component add clippy; then cargo_feature opentelemetry-proto "full" cargo_feature opentelemetry-proto "gen-tonic,trace" cargo_feature opentelemetry-proto "gen-tonic,trace,with-serde" + cargo_feature opentelemetry-proto "gen-tonic,trace,with-schemars,with-serde" cargo_feature opentelemetry-proto "gen-tonic,metrics" cargo_feature opentelemetry-proto "gen-tonic,logs" cargo_feature opentelemetry-proto "gen-grpcio,trace" cargo_feature opentelemetry-proto "gen-grpcio,trace,with-serde" + cargo_feature opentelemetry-proto "gen-grpcio,trace,with-schemars,with-serde" cargo_feature opentelemetry-proto "gen-grpcio,metrics" cargo_feature opentelemetry-proto "gen-grpcio,logs" cargo_feature opentelemetry-proto "gen-grpcio,zpages" cargo_feature opentelemetry-proto "gen-grpcio,zpages,with-serde" + cargo_feature opentelemetry-proto "gen-grpcio,zpages,with-schemars,with-serde" fi From ec38660f69e398af1533e4a03cfe7a891921e87b Mon Sep 17 00:00:00 2001 From: Olivier Soucy Date: Mon, 4 Dec 2023 19:59:54 -0500 Subject: [PATCH 53/68] Fix metric_counter bench (#1424) --- opentelemetry-sdk/Cargo.toml | 1 + opentelemetry-sdk/benches/metric_counter.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index f8a7bf5530..4de782d350 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -66,6 +66,7 @@ harness = false [[bench]] name = "metric_counter" harness = false +required-features = ["metrics"] [[bench]] name = "attribute_set" diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index ba21da07c8..4bb4c84e6a 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -7,7 +7,7 @@ use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; use rand::{rngs::SmallRng, Rng, SeedableRng}; // Run this benchmark with: -// cargo bench --bench metric_counter --features=metrics,testing +// cargo bench --bench metric_counter --features=metrics fn create_counter() -> Counter { let meter_provider: SdkMeterProvider = SdkMeterProvider::builder() .with_reader(ManualReader::builder().build()) From 1a3f368347206eedf8dddbb94fea7b8ba60c953e Mon Sep 17 00:00:00 2001 From: Jasper Zeng Date: Wed, 6 Dec 2023 16:43:09 +0800 Subject: [PATCH 54/68] feat: omit empty otel_scope_info and otel_target_info metrics (#1428) Fixes #1282 * omit otel_scope_info and otel_target_info metrics when scope attributes and resource attributes are empty. * Update `CHANGELOG.md` --- opentelemetry-prometheus/CHANGELOG.md | 4 +++ opentelemetry-prometheus/src/lib.rs | 14 ++++---- .../tests/data/empty_resource.txt | 3 -- .../tests/integration_test.rs | 36 +++++++++++++++---- 4 files changed, 42 insertions(+), 15 deletions(-) diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index 9436957992..a6093b5a27 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +### Changed + +- Omit empty `otel_scope_info` and `otel_target_info` metrics [#1428](https://github.com/open-telemetry/opentelemetry-rust/pull/1428) + ## v0.14.1 ### Fixed diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index bb637f52b2..6ff87d3ed0 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -299,17 +299,19 @@ impl prometheus::core::Collector for Collector { // Resource should be immutable, we don't need to compute again create_info_metric(TARGET_INFO_NAME, TARGET_INFO_DESCRIPTION, &metrics.resource) }); - if !self.disable_target_info { + if !self.disable_target_info && !metrics.resource.is_empty() { res.push(target_info.clone()) } for scope_metrics in metrics.scope_metrics { let scope_labels = if !self.disable_scope_info { - let scope_info = inner - .scope_infos - .entry(scope_metrics.scope.clone()) - .or_insert_with_key(create_scope_info_metric); - res.push(scope_info.clone()); + if !scope_metrics.scope.attributes.is_empty() { + let scope_info = inner + .scope_infos + .entry(scope_metrics.scope.clone()) + .or_insert_with_key(create_scope_info_metric); + res.push(scope_info.clone()); + } let mut labels = Vec::with_capacity(1 + scope_metrics.scope.version.is_some() as usize); diff --git a/opentelemetry-prometheus/tests/data/empty_resource.txt b/opentelemetry-prometheus/tests/data/empty_resource.txt index e313006e34..69f0e83668 100644 --- a/opentelemetry-prometheus/tests/data/empty_resource.txt +++ b/opentelemetry-prometheus/tests/data/empty_resource.txt @@ -4,6 +4,3 @@ foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_ver # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 -# HELP target_info Target metadata -# TYPE target_info gauge -target_info 1 diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index aff4c43a99..0786b1a4c0 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -352,8 +352,12 @@ fn prometheus_exporter_integration() { .unwrap(), ) .build(); - let meter = - provider.versioned_meter("testmeter", Some("v0.1.0"), None::<&'static str>, None); + let meter = provider.versioned_meter( + "testmeter", + Some("v0.1.0"), + None::<&'static str>, + Some(vec![KeyValue::new("k", "v")]), + ); (tc.record_metrics)(meter); let content = fs::read_to_string(Path::new("./tests/data").join(tc.expected_file)) @@ -401,7 +405,12 @@ fn multiple_scopes() { .build(); let foo_counter = provider - .versioned_meter("meterfoo", Some("v0.1.0"), None::<&'static str>, None) + .versioned_meter( + "meterfoo", + Some("v0.1.0"), + None::<&'static str>, + Some(vec![KeyValue::new("k", "v")]), + ) .u64_counter("foo") .with_unit(Unit::new("ms")) .with_description("meter foo counter") @@ -409,7 +418,12 @@ fn multiple_scopes() { foo_counter.add(100, &[KeyValue::new("type", "foo")]); let bar_counter = provider - .versioned_meter("meterbar", Some("v0.1.0"), None::<&'static str>, None) + .versioned_meter( + "meterbar", + Some("v0.1.0"), + None::<&'static str>, + Some(vec![KeyValue::new("k", "v")]), + ) .u64_counter("bar") .with_unit(Unit::new("ms")) .with_description("meter bar counter") @@ -735,8 +749,18 @@ fn duplicate_metrics() { .with_reader(exporter) .build(); - let meter_a = provider.versioned_meter("ma", Some("v0.1.0"), None::<&'static str>, None); - let meter_b = provider.versioned_meter("mb", Some("v0.1.0"), None::<&'static str>, None); + let meter_a = provider.versioned_meter( + "ma", + Some("v0.1.0"), + None::<&'static str>, + Some(vec![KeyValue::new("k", "v")]), + ); + let meter_b = provider.versioned_meter( + "mb", + Some("v0.1.0"), + None::<&'static str>, + Some(vec![KeyValue::new("k", "v")]), + ); (tc.record_metrics)(meter_a, meter_b); From 670d34a460fd4fb856f9af1f8044a729f2e7cf6c Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Thu, 7 Dec 2023 10:12:49 -0800 Subject: [PATCH 55/68] Remove `tracing-opentelemetry` dependency from `tracing-grpc` example (#1426) --- examples/tracing-grpc/Cargo.toml | 10 ++--- examples/tracing-grpc/src/client.rs | 70 ++++++++++++++++++----------- examples/tracing-grpc/src/server.rs | 46 +++++++++++-------- 3 files changed, 74 insertions(+), 52 deletions(-) diff --git a/examples/tracing-grpc/Cargo.toml b/examples/tracing-grpc/Cargo.toml index 625b793955..6875b64db6 100644 --- a/examples/tracing-grpc/Cargo.toml +++ b/examples/tracing-grpc/Cargo.toml @@ -14,16 +14,12 @@ name = "grpc-client" path = "src/client.rs" [dependencies] -opentelemetry = { version = "0.20" } -opentelemetry_sdk = { version = "0.20", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.19", features = ["rt-tokio"] } +opentelemetry = { path = "../../opentelemetry" } +opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"]} +opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] } prost = "0.11" tokio = { version = "1.28", features = ["full"] } tonic = "0.9.2" -tracing = "0.1" -tracing-futures = "0.2" -tracing-opentelemetry = "0.20" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } [build-dependencies] tonic-build = "0.9.2" diff --git a/examples/tracing-grpc/src/client.rs b/examples/tracing-grpc/src/client.rs index aacbd71312..02e15dea6f 100644 --- a/examples/tracing-grpc/src/client.rs +++ b/examples/tracing-grpc/src/client.rs @@ -1,10 +1,25 @@ use hello_world::greeter_client::GreeterClient; use hello_world::HelloRequest; use opentelemetry::{global, propagation::Injector}; -use tracing::*; -use tracing_futures::Instrument; -use tracing_opentelemetry::OpenTelemetrySpanExt; -use tracing_subscriber::prelude::*; +use opentelemetry_sdk::{ + propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider, +}; +use opentelemetry_stdout::SpanExporter; + +use opentelemetry::{ + trace::{SpanKind, TraceContextExt, Tracer}, + Context, KeyValue, +}; + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + // Install stdout exporter pipeline to be able to retrieve the collected spans. + let provider = TracerProvider::builder() + .with_batch_exporter(SpanExporter::default(), Tokio) + .build(); + + global::set_tracer_provider(provider); +} struct MetadataMap<'a>(&'a mut tonic::metadata::MetadataMap); @@ -24,45 +39,46 @@ pub mod hello_world { tonic::include_proto!("helloworld"); } -#[instrument] async fn greet() -> Result<(), Box> { - let mut client = GreeterClient::connect("http://[::1]:50051") - .instrument(info_span!("client connect")) - .await?; + let tracer = global::tracer("example/client"); + let span = tracer + .span_builder(String::from("Greeter/client")) + .with_kind(SpanKind::Client) + .with_attributes(vec![KeyValue::new("component", "grpc")]) + .start(&tracer); + let cx = Context::current_with_span(span); + let mut client = GreeterClient::connect("http://[::1]:50051").await?; let mut request = tonic::Request::new(HelloRequest { name: "Tonic".into(), }); global::get_text_map_propagator(|propagator| { - propagator.inject_context( - &tracing::Span::current().context(), - &mut MetadataMap(request.metadata_mut()), - ) + propagator.inject_context(&cx, &mut MetadataMap(request.metadata_mut())) }); - let response = client - .say_hello(request) - .instrument(info_span!("say_hello")) - .await?; + let response = client.say_hello(request).await; + + let status = match response { + Ok(_res) => "OK".to_string(), + Err(status) => { + // Access the status code + let status_code = status.code(); + status_code.to_string() + } + }; + cx.span().add_event( + "Got response!".to_string(), + vec![KeyValue::new("status", status)], + ); - info!("Response received: {:?}", response); Ok(()) } #[tokio::main] async fn main() -> Result<(), Box> { - global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_service_name("grpc-client") - .install_simple()?; - tracing_subscriber::registry() - .with(tracing_subscriber::EnvFilter::new("INFO")) - .with(tracing_opentelemetry::layer().with_tracer(tracer)) - .try_init()?; - + init_tracer(); greet().await?; - opentelemetry::global::shutdown_tracer_provider(); Ok(()) diff --git a/examples/tracing-grpc/src/server.rs b/examples/tracing-grpc/src/server.rs index 7e05cbd602..3831907cf0 100644 --- a/examples/tracing-grpc/src/server.rs +++ b/examples/tracing-grpc/src/server.rs @@ -1,10 +1,25 @@ use hello_world::greeter_server::{Greeter, GreeterServer}; use hello_world::{HelloReply, HelloRequest}; -use opentelemetry::{global, propagation::Extractor}; +use opentelemetry::{ + global, + propagation::Extractor, + trace::{Span, SpanKind, Tracer}, +}; +use opentelemetry_sdk::{ + propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider, +}; +use opentelemetry_stdout::SpanExporter; use tonic::{transport::Server, Request, Response, Status}; -use tracing::*; -use tracing_opentelemetry::OpenTelemetrySpanExt; -use tracing_subscriber::prelude::*; + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + // Install stdout exporter pipeline to be able to retrieve the collected spans. + let provider = TracerProvider::builder() + .with_batch_exporter(SpanExporter::default(), Tokio) + .build(); + + global::set_tracer_provider(provider); +} #[allow(clippy::derive_partial_eq_without_eq)] // tonic don't derive Eq for generated types. We shouldn't manually change it. pub mod hello_world { @@ -31,10 +46,9 @@ impl<'a> Extractor for MetadataMap<'a> { } } -#[instrument] -fn expensive_fn(to_print: String) { +fn expensive_fn(to_print: String, span: &mut S) { std::thread::sleep(std::time::Duration::from_millis(20)); - info!("{}", to_print); + span.add_event(to_print, vec![]); } #[derive(Debug, Default)] @@ -42,17 +56,20 @@ pub struct MyGreeter {} #[tonic::async_trait] impl Greeter for MyGreeter { - #[instrument] async fn say_hello( &self, request: Request, // Accept request of type HelloRequest ) -> Result, Status> { let parent_cx = global::get_text_map_propagator(|prop| prop.extract(&MetadataMap(request.metadata()))); - tracing::Span::current().set_parent(parent_cx); + let tracer = global::tracer("example/server"); + let mut span = tracer + .span_builder("Greeter/server") + .with_kind(SpanKind::Server) + .start_with_context(&tracer, &parent_cx); let name = request.into_inner().name; - expensive_fn(format!("Got name: {name:?}")); + expensive_fn(format!("Got name: {name:?}"), &mut span); // Return an instance of type HelloReply let reply = hello_world::HelloReply { @@ -65,14 +82,7 @@ impl Greeter for MyGreeter { #[tokio::main] async fn main() -> Result<(), Box> { - global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_service_name("grpc-server") - .install_batch(opentelemetry_sdk::runtime::Tokio)?; - tracing_subscriber::registry() - .with(tracing_subscriber::EnvFilter::new("INFO")) - .with(tracing_opentelemetry::layer().with_tracer(tracer)) - .try_init()?; + init_tracer(); let addr = "[::1]:50051".parse()?; let greeter = MyGreeter::default(); From 66c1d5f7343ea7c75ab2750e7eb14c75ec58aed5 Mon Sep 17 00:00:00 2001 From: Olivier Soucy Date: Thu, 7 Dec 2023 13:53:30 -0500 Subject: [PATCH 56/68] Normalized metadata in opentelemetry-appender-tracing. (#1380) --- opentelemetry-appender-tracing/CHANGELOG.md | 5 + opentelemetry-appender-tracing/Cargo.toml | 9 +- opentelemetry-appender-tracing/src/layer.rs | 358 +++++++++++++++++--- 3 files changed, 328 insertions(+), 44 deletions(-) diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index 5eedb4d1ef..587e279113 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -11,6 +11,11 @@ ### Added - Add log appender versions to loggers (#1182) +- New experimental metadata attributes feature (experimental\_metadata\_attributes) [#1380](https://github.com/open-telemetry/opentelemetry-rust/pull/1380) + - Experimental new attributes for tracing metadata + - Fixes the following for events emitted using log crate + - Normalized metadata fields + - Remove redundant metadata ## v0.1.0 diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index c0775d2f14..d38a6f9325 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -11,17 +11,20 @@ license = "Apache-2.0" rust-version = "1.65" [dependencies] +log = { version = "0.4", optional = true } opentelemetry = { version = "0.21", path = "../opentelemetry", features = ["logs"] } opentelemetry_sdk = { version = "0.21", path = "../opentelemetry-sdk", features = ["logs"] } tracing = {version = "0.1", default-features = false, features = ["std"]} -tracing-core = "0.1.31" -tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } +tracing-core = "0.1" +tracing-log = { version = "0.2", optional = true } +tracing-subscriber = { version = "0.3", default-features = false, features = ["registry", "std"] } once_cell = "1.13.0" [dev-dependencies] opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } [features] +experimental_metadata_attributes = ["dep:tracing-log"] logs_level_enabled = ["opentelemetry/logs_level_enabled", "opentelemetry_sdk/logs_level_enabled"] default = ["logs_level_enabled"] -testing = ["opentelemetry_sdk/testing"] \ No newline at end of file +testing = ["opentelemetry_sdk/testing", "dep:tracing-log", "dep:log"] diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index 6c9232f043..217f62e0e5 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -1,61 +1,134 @@ -use opentelemetry::logs::{LogRecord, Logger, LoggerProvider, Severity}; +use opentelemetry::{ + logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity}, + Key, +}; use std::borrow::Cow; -use tracing_core::Level; +use tracing_core::{Level, Metadata}; +#[cfg(feature = "experimental_metadata_attributes")] +use tracing_log::NormalizeEvent; use tracing_subscriber::Layer; const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing"; /// Visitor to record the fields from the event record. -struct EventVisitor<'a> { - log_record: &'a mut LogRecord, +#[derive(Default)] +struct EventVisitor { + log_record_attributes: Vec<(Key, AnyValue)>, + log_record_body: Option, } -impl<'a> tracing::field::Visit for EventVisitor<'a> { +#[cfg(feature = "experimental_metadata_attributes")] +fn is_metadata(field: &'static str) -> bool { + field + .strip_prefix("log.") + .map(|remainder| { + matches!( + remainder, + "file" + | "line" + | "module_path" + | "module.path" + | "name" + | "source.file.line" + | "source.file.path" + | "source.file.name" + | "target" + ) + }) + .unwrap_or(false) +} + +#[cfg(feature = "experimental_metadata_attributes")] +fn get_filename(filepath: &str) -> &str { + if let Some((_, filename)) = filepath.rsplit_once('/') { + return filename; + } + if let Some((_, filename)) = filepath.rsplit_once('\\') { + return filename; + } + filepath +} + +impl EventVisitor { + fn visit_metadata(&mut self, meta: &Metadata) { + self.log_record_attributes + .push(("name".into(), meta.name().into())); + + #[cfg(feature = "experimental_metadata_attributes")] + self.visit_experimental_metadata(meta); + } + + #[cfg(feature = "experimental_metadata_attributes")] + fn visit_experimental_metadata(&mut self, meta: &Metadata) { + self.log_record_attributes + .push(("log.target".into(), meta.target().to_owned().into())); + + if let Some(module_path) = meta.module_path() { + self.log_record_attributes + .push(("log.module.path".into(), module_path.to_owned().into())); + } + + if let Some(filepath) = meta.file() { + self.log_record_attributes + .push(("log.source.file.path".into(), filepath.to_owned().into())); + self.log_record_attributes.push(( + "log.source.file.name".into(), + get_filename(filepath).to_owned().into(), + )); + } + + if let Some(line) = meta.line() { + self.log_record_attributes + .push(("log.source.file.line".into(), line.into())); + } + } + + fn push_to_otel_log_record(self, log_record: &mut LogRecord) { + log_record.body = self.log_record_body; + log_record.attributes = Some(self.log_record_attributes); + } +} + +impl tracing::field::Visit for EventVisitor { fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { + #[cfg(feature = "experimental_metadata_attributes")] + if is_metadata(field.name()) { + return; + } if field.name() == "message" { - self.log_record.body = Some(format!("{value:?}").into()); - } else if let Some(ref mut vec) = self.log_record.attributes { - vec.push((field.name().into(), format!("{value:?}").into())); + self.log_record_body = Some(format!("{value:?}").into()); } else { - let vec = vec![(field.name().into(), format!("{value:?}").into())]; - self.log_record.attributes = Some(vec); + self.log_record_attributes + .push((field.name().into(), format!("{value:?}").into())); } } fn record_str(&mut self, field: &tracing_core::Field, value: &str) { - if let Some(ref mut vec) = self.log_record.attributes { - vec.push((field.name().into(), value.to_owned().into())); - } else { - let vec = vec![(field.name().into(), value.to_owned().into())]; - self.log_record.attributes = Some(vec); + #[cfg(feature = "experimental_metadata_attributes")] + if is_metadata(field.name()) { + return; } + self.log_record_attributes + .push((field.name().into(), value.to_owned().into())); } fn record_bool(&mut self, field: &tracing_core::Field, value: bool) { - if let Some(ref mut vec) = self.log_record.attributes { - vec.push((field.name().into(), value.into())); - } else { - let vec = vec![(field.name().into(), value.into())]; - self.log_record.attributes = Some(vec); - } + self.log_record_attributes + .push((field.name().into(), value.into())); } fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { - if let Some(ref mut vec) = self.log_record.attributes { - vec.push((field.name().into(), value.into())); - } else { - let vec = vec![(field.name().into(), value.into())]; - self.log_record.attributes = Some(vec); - } + self.log_record_attributes + .push((field.name().into(), value.into())); } fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { - if let Some(ref mut vec) = self.log_record.attributes { - vec.push((field.name().into(), value.into())); - } else { - let vec = vec![(field.name().into(), value.into())]; - self.log_record.attributes = Some(vec); + #[cfg(feature = "experimental_metadata_attributes")] + if is_metadata(field.name()) { + return; } + self.log_record_attributes + .push((field.name().into(), value.into())); } // TODO: Remaining field types from AnyValue : Bytes, ListAny, Boolean @@ -99,23 +172,27 @@ where event: &tracing::Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>, ) { + #[cfg(feature = "experimental_metadata_attributes")] + let normalized_meta = event.normalized_metadata(); + #[cfg(feature = "experimental_metadata_attributes")] + let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); + + #[cfg(not(feature = "experimental_metadata_attributes"))] let meta = event.metadata(); + let mut log_record: LogRecord = LogRecord::default(); log_record.severity_number = Some(severity_of_level(meta.level())); log_record.severity_text = Some(meta.level().to_string().into()); - // add the `name` metadata to attributes - // TBD - Propose this to be part of log_record metadata. - let vec = vec![("name", meta.name())]; - log_record.attributes = Some(vec.into_iter().map(|(k, v)| (k.into(), v.into())).collect()); - // Not populating ObservedTimestamp, instead relying on OpenTelemetry // API to populate it with current time. - let mut visitor = EventVisitor { - log_record: &mut log_record, - }; + let mut visitor = EventVisitor::default(); + visitor.visit_metadata(meta); + // Visit fields. event.record(&mut visitor); + visitor.push_to_otel_log_record(&mut log_record); + self.logger.emit(log_record); } @@ -196,11 +273,29 @@ mod tests { .attributes .clone() .expect("Attributes are expected"); + #[cfg(not(feature = "experimental_metadata_attributes"))] assert_eq!(attributes.len(), 4); + #[cfg(feature = "experimental_metadata_attributes")] + assert_eq!(attributes.len(), 9); assert!(attributes.contains(&(Key::new("name"), "my-event-name".into()))); assert!(attributes.contains(&(Key::new("event_id"), 20.into()))); assert!(attributes.contains(&(Key::new("user_name"), "otel".into()))); assert!(attributes.contains(&(Key::new("user_email"), "otel@opentelemetry.io".into()))); + #[cfg(feature = "experimental_metadata_attributes")] + { + assert!(attributes.contains(&(Key::new("log.source.file.name"), "layer.rs".into()))); + assert!(attributes.contains(&( + Key::new("log.module.path"), + "opentelemetry_appender_tracing::layer::tests".into() + ))); + // The other 3 experimental_metadata_attributes are too unstable to check their value. + // Ex.: The path will be different on a Windows and Linux machine. + // Ex.: The line can change easily if someone makes changes in this source file. + let attributes_key: Vec = attributes.iter().map(|(key, _)| key.clone()).collect(); + assert!(attributes_key.contains(&Key::new("log.source.file.path"))); + assert!(attributes_key.contains(&Key::new("log.source.file.line"))); + assert!(attributes_key.contains(&Key::new("log.target"))); + } } #[test] @@ -231,7 +326,7 @@ mod tests { // logging is done inside span context. error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - (trace_id, span_id) + (trace_id, span_id) }); logger_provider.force_flush(); @@ -275,10 +370,191 @@ mod tests { .attributes .clone() .expect("Attributes are expected"); + #[cfg(not(feature = "experimental_metadata_attributes"))] assert_eq!(attributes.len(), 4); + #[cfg(feature = "experimental_metadata_attributes")] + assert_eq!(attributes.len(), 9); assert!(attributes.contains(&(Key::new("name"), "my-event-name".into()))); assert!(attributes.contains(&(Key::new("event_id"), 20.into()))); assert!(attributes.contains(&(Key::new("user_name"), "otel".into()))); assert!(attributes.contains(&(Key::new("user_email"), "otel@opentelemetry.io".into()))); + #[cfg(feature = "experimental_metadata_attributes")] + { + assert!(attributes.contains(&(Key::new("log.source.file.name"), "layer.rs".into()))); + assert!(attributes.contains(&( + Key::new("log.module.path"), + "opentelemetry_appender_tracing::layer::tests".into() + ))); + // The other 3 experimental_metadata_attributes are too unstable to check their value. + // Ex.: The path will be different on a Windows and Linux machine. + // Ex.: The line can change easily if someone makes changes in this source file. + let attributes_key: Vec = attributes.iter().map(|(key, _)| key.clone()).collect(); + assert!(attributes_key.contains(&Key::new("log.source.file.path"))); + assert!(attributes_key.contains(&Key::new("log.source.file.line"))); + assert!(attributes_key.contains(&Key::new("log.target"))); + } + } + + #[test] + fn tracing_appender_standalone_with_tracing_log() { + // Arrange + let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + // avoiding setting tracing subscriber as global as that does not + // play well with unit tests. + let _guard = tracing::subscriber::set_default(subscriber); + drop(tracing_log::LogTracer::init()); + + // Act + log::error!("log from log crate"); + logger_provider.force_flush(); + + // Assert TODO: move to helper methods + let exported_logs = exporter + .get_emitted_logs() + .expect("Logs are expected to be exported."); + assert_eq!(exported_logs.len(), 1); + let log = exported_logs + .get(0) + .expect("Atleast one log is expected to be present."); + + // Validate common fields + assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number, Some(Severity::Error)); + + // Validate trace context is none. + assert!(log.record.trace_context.is_none()); + + // Validate attributes + let attributes: Vec<(Key, AnyValue)> = log + .record + .attributes + .clone() + .expect("Attributes are expected"); + + // Attributes can be polluted when we don't use this feature. + #[cfg(feature = "experimental_metadata_attributes")] + assert_eq!(attributes.len(), 6); + + assert!(attributes.contains(&(Key::new("name"), "log event".into()))); + + #[cfg(feature = "experimental_metadata_attributes")] + { + assert!(attributes.contains(&(Key::new("log.source.file.name"), "layer.rs".into()))); + assert!(attributes.contains(&( + Key::new("log.module.path"), + "opentelemetry_appender_tracing::layer::tests".into() + ))); + // The other 3 experimental_metadata_attributes are too unstable to check their value. + // Ex.: The path will be different on a Windows and Linux machine. + // Ex.: The line can change easily if someone makes changes in this source file. + let attributes_key: Vec = attributes.iter().map(|(key, _)| key.clone()).collect(); + assert!(attributes_key.contains(&Key::new("log.source.file.path"))); + assert!(attributes_key.contains(&Key::new("log.source.file.line"))); + assert!(attributes_key.contains(&Key::new("log.target"))); + } + } + + #[test] + fn tracing_appender_inside_tracing_context_with_tracing_log() { + // Arrange + let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + + // avoiding setting tracing subscriber as global as that does not + // play well with unit tests. + let _guard = tracing::subscriber::set_default(subscriber); + drop(tracing_log::LogTracer::init()); + + // setup tracing as well. + let tracer_provider = TracerProvider::builder() + .with_config(config().with_sampler(Sampler::AlwaysOn)) + .build(); + let tracer = tracer_provider.tracer("test-tracer"); + + // Act + let (trace_id_expected, span_id_expected) = tracer.in_span("test-span", |cx| { + let trace_id = cx.span().span_context().trace_id(); + let span_id = cx.span().span_context().span_id(); + + // logging is done inside span context. + log::error!("log from log crate"); + (trace_id, span_id) + }); + + logger_provider.force_flush(); + + // Assert TODO: move to helper methods + let exported_logs = exporter + .get_emitted_logs() + .expect("Logs are expected to be exported."); + assert_eq!(exported_logs.len(), 1); + let log = exported_logs + .get(0) + .expect("Atleast one log is expected to be present."); + + // validate common fields. + assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number, Some(Severity::Error)); + + // validate trace context. + assert!(log.record.trace_context.is_some()); + assert_eq!( + log.record.trace_context.as_ref().unwrap().trace_id, + trace_id_expected + ); + assert_eq!( + log.record.trace_context.as_ref().unwrap().span_id, + span_id_expected + ); + assert_eq!( + log.record + .trace_context + .as_ref() + .unwrap() + .trace_flags + .unwrap(), + TraceFlags::SAMPLED + ); + + // validate attributes. + let attributes: Vec<(Key, AnyValue)> = log + .record + .attributes + .clone() + .expect("Attributes are expected"); + + // Attributes can be polluted when we don't use this feature. + #[cfg(feature = "experimental_metadata_attributes")] + assert_eq!(attributes.len(), 6); + + assert!(attributes.contains(&(Key::new("name"), "log event".into()))); + + #[cfg(feature = "experimental_metadata_attributes")] + { + assert!(attributes.contains(&(Key::new("log.source.file.name"), "layer.rs".into()))); + assert!(attributes.contains(&( + Key::new("log.module.path"), + "opentelemetry_appender_tracing::layer::tests".into() + ))); + // The other 3 experimental_metadata_attributes are too unstable to check their value. + // Ex.: The path will be different on a Windows and Linux machine. + // Ex.: The line can change easily if someone makes changes in this source file. + let attributes_key: Vec = attributes.iter().map(|(key, _)| key.clone()).collect(); + assert!(attributes_key.contains(&Key::new("log.source.file.path"))); + assert!(attributes_key.contains(&Key::new("log.source.file.line"))); + assert!(attributes_key.contains(&Key::new("log.target"))); + } } } From 443f68b1b8cd9016aec7f21ce913884f473c848c Mon Sep 17 00:00:00 2001 From: Jacob Rothstein Date: Thu, 7 Dec 2023 19:54:25 -0800 Subject: [PATCH 57/68] Add trillium-opentelemetry to readme (#1430) --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 6db1293150..e6cd381a2e 100644 --- a/README.md +++ b/README.md @@ -130,6 +130,7 @@ maintained by the `opentelemetry` project. These include: Application Insights] exporter. * [`opentelemetry-tide`] provides integration for the [`Tide`] web server and ecosystem. +* [`trillium-opentelemetry`] provides metrics instrumentation for [`trillium`] http servers following [semantic-conventions](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/http-metrics.md). If you're the maintainer of an `opentelemetry` ecosystem crate not listed above, please let us know! We'd love to add your project to the list! @@ -161,6 +162,8 @@ above, please let us know! We'd love to add your project to the list! [`Tide`]: https://crates.io/crates/tide [`opentelemetry-stackdriver`]: https://crates.io/crates/opentelemetry-stackdriver [Cloud Trace]: https://cloud.google.com/trace/ +[`trillium-opentelemetry`]: https://github.com/trillium-rs/trillium-opentelemetry +[`trillium`]: https://github.com/trillium-rs/trillium ## Supported Rust Versions From 8f237f9d991a089b9f802e27763b9363c99af9ac Mon Sep 17 00:00:00 2001 From: Thomas Knickman Date: Mon, 11 Dec 2023 18:21:34 -0500 Subject: [PATCH 58/68] fix(docs): dead examples link in README.md (#1436) --- opentelemetry/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opentelemetry/README.md b/opentelemetry/README.md index 70ca36aa9c..f83a6a1ef9 100644 --- a/opentelemetry/README.md +++ b/opentelemetry/README.md @@ -52,7 +52,7 @@ fn main() { } ``` -See the [examples](./examples) directory for different integration patterns. +See the [examples](../examples) directory for different integration patterns. ## Ecosystem From 1dd269af453ad335fd620e1ea77acd868d673c1d Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Mon, 11 Dec 2023 18:15:23 -0800 Subject: [PATCH 59/68] Update meeting time to 9 AM PT (#1438) --- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e831d0df42..12dd9c12a2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,7 +1,7 @@ # Contributing to opentelemetry-rust -The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific -Time (16:00 UTC). The meeting is subject to change depending on contributors' +The Rust special interest group (SIG) meets weekly on Tuesdays at 9 AM Pacific +Time. The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of diff --git a/README.md b/README.md index e6cd381a2e..95492a2e3f 100644 --- a/README.md +++ b/README.md @@ -182,8 +182,8 @@ this policy. See the [contributing file](CONTRIBUTING.md). -The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific -Time (16:00 UTC). The meeting is subject to change depending on contributors' +The Rust special interest group (SIG) meets weekly on Tuesdays at 9 AM Pacific +Time. The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of From 038b91b651a56a9cce76cc57cba62ba8be0eefc2 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Tue, 12 Dec 2023 12:23:42 -0800 Subject: [PATCH 60/68] Add experimental synchronous gauge (#1410) --- CONTRIBUTING.md | 21 ++++++ examples/metrics-basic/Cargo.toml | 8 ++- examples/metrics-basic/src/main.rs | 32 +++++++-- opentelemetry-sdk/CHANGELOG.md | 4 ++ opentelemetry-sdk/src/metrics/instrument.rs | 16 ++++- opentelemetry-sdk/src/metrics/meter.rs | 60 +++++++++++++++- opentelemetry-sdk/src/metrics/pipeline.rs | 14 ++-- opentelemetry-sdk/src/metrics/reader.rs | 2 + opentelemetry/CHANGELOG.md | 6 ++ opentelemetry/Cargo.toml | 1 + .../src/metrics/instruments/gauge.rs | 68 ++++++++++++++++++- opentelemetry/src/metrics/meter.rs | 35 ++++++++++ opentelemetry/src/metrics/mod.rs | 32 ++++++++- opentelemetry/src/metrics/noop.rs | 8 ++- 14 files changed, 285 insertions(+), 22 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 12dd9c12a2..ec88c2f998 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -137,6 +137,27 @@ OpenTelemetry supports multiple ways to configure the API, SDK and other compone - Environment variables - Compiling time configurations provided in the source code +### Experimental/Unstable features: + +Use `otel_unstable` feature flag for implementation of specification with [experimental](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.27.0/specification/document-status.md) status. This approach ensures clear demarcation and safe integration of new or evolving features. Utilize the following structure: + +```rust +#[cfg(feature = "otel_unstable")] +{ + // Your feature implementation +} +``` +It's important to regularly review and remove the `otel_unstable` flag from the code once the feature becomes stable. This cleanup process is crucial to maintain the overall code quality and to ensure that stable features are accurately reflected in the main build. + +### Optional features: + +The potential features include: + +- Stable and non-experimental features that compliant to specification, and have a feature flag to minimize compilation size. Example: feature flags for signals (like `logs`, `traces`, `metrics`) and runtimes (`rt-tokio`, `rt-tokio-current-thread`, `rt-async-std`). +- Stable and non-experimental features, although not part of the specification, are crucial for enhancing the tracing/log crate's functionality or boosting performance. These features are also subject to discussion and approval by the OpenTelemetry Rust Maintainers. An example of such a feature is `logs_level_enabled`. + +All such features should adhere to naming convention `_` + ## Style Guide - Run `cargo clippy --all` - this will catch common mistakes and improve diff --git a/examples/metrics-basic/Cargo.toml b/examples/metrics-basic/Cargo.toml index 1e33cdfd7e..d505b8cbd5 100644 --- a/examples/metrics-basic/Cargo.toml +++ b/examples/metrics-basic/Cargo.toml @@ -6,8 +6,12 @@ license = "Apache-2.0" publish = false [dependencies] -opentelemetry = { path = "../../opentelemetry", features = ["metrics"] } +opentelemetry = { path = "../../opentelemetry", features = ["metrics", "otel_unstable"] } opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"]} tokio = { version = "1.0", features = ["full"] } -serde_json = {version = "1.0"} \ No newline at end of file +serde_json = {version = "1.0"} + +[features] +default = ["otel_unstable"] +otel_unstable = ["opentelemetry/otel_unstable"] diff --git a/examples/metrics-basic/src/main.rs b/examples/metrics-basic/src/main.rs index c3307d1f3a..78dda47cad 100644 --- a/examples/metrics-basic/src/main.rs +++ b/examples/metrics-basic/src/main.rs @@ -110,17 +110,37 @@ async fn main() -> Result<(), Box> { // Note that there is no ObservableHistogram instrument. + // Create a Gauge Instrument. + // Note that the Gauge instrument is experimental, and can be changed/removed in the future releases. + #[cfg(feature = "otel_unstable")] + { + let gauge = meter + .f64_gauge("my_gauge") + .with_description("A gauge set to 1.0") + .with_unit(Unit::new("myunit")) + .init(); + + gauge.record( + 1.0, + [ + KeyValue::new("mykey1", "myvalue1"), + KeyValue::new("mykey2", "myvalue2"), + ] + .as_ref(), + ); + } + // Create a ObservableGauge instrument and register a callback that reports the measurement. - let gauge = meter - .f64_observable_gauge("my_gauge") - .with_description("A gauge set to 1.0") + let observable_gauge = meter + .f64_observable_gauge("my_observable_gauge") + .with_description("An observable gauge set to 1.0") .with_unit(Unit::new("myunit")) .init(); // Register a callback that reports the measurement. - meter.register_callback(&[gauge.as_any()], move |observer| { + meter.register_callback(&[observable_gauge.as_any()], move |observer| { observer.observe_f64( - &gauge, + &observable_gauge, 1.0, [ KeyValue::new("mykey1", "myvalue1"), @@ -130,8 +150,6 @@ async fn main() -> Result<(), Box> { ) })?; - // Note that Gauge only has a Observable version. - // Metrics are exported by default every 30 seconds when using stdout exporter, // however shutting down the MeterProvider here instantly flushes // the metrics, instead of waiting for the 30 sec interval. diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 1b5e0b76f9..ddb2315b70 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +### Added + +- [#1410](https://github.com/open-telemetry/opentelemetry-rust/pull/1410) Add experimental synchronous gauge + ### Changed - **Breaking** diff --git a/opentelemetry-sdk/src/metrics/instrument.rs b/opentelemetry-sdk/src/metrics/instrument.rs index 7971d47d3c..3ecae355b5 100644 --- a/opentelemetry-sdk/src/metrics/instrument.rs +++ b/opentelemetry-sdk/src/metrics/instrument.rs @@ -2,7 +2,8 @@ use std::{any::Any, borrow::Cow, collections::HashSet, hash::Hash, marker, sync: use opentelemetry::{ metrics::{ - AsyncInstrument, MetricsError, Result, SyncCounter, SyncHistogram, SyncUpDownCounter, Unit, + AsyncInstrument, MetricsError, Result, SyncCounter, SyncGauge, SyncHistogram, + SyncUpDownCounter, Unit, }, Key, KeyValue, }; @@ -33,6 +34,11 @@ pub enum InstrumentKind { /// A group of instruments that record increasing and decreasing values in an /// asynchronous callback. ObservableUpDownCounter, + + /// a group of instruments that record current value synchronously with + /// the code path they are measuring. + Gauge, + /// /// a group of instruments that record current values in an asynchronous callback. ObservableGauge, } @@ -268,6 +274,14 @@ impl SyncUpDownCounter for ResolvedMeasures { } } +impl SyncGauge for ResolvedMeasures { + fn record(&self, val: T, attrs: &[KeyValue]) { + for measure in &self.measures { + measure.call(val, AttributeSet::from(attrs)) + } + } +} + impl SyncHistogram for ResolvedMeasures { fn record(&self, val: T, attrs: &[KeyValue]) { for measure in &self.measures { diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index 5eaeba7745..c801adcb0a 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -5,9 +5,9 @@ use opentelemetry::{ global, metrics::{ noop::{NoopAsyncInstrument, NoopRegistration}, - AsyncInstrument, Callback, CallbackRegistration, Counter, Histogram, InstrumentProvider, - MetricsError, ObservableCounter, ObservableGauge, ObservableUpDownCounter, - Observer as ApiObserver, Result, Unit, UpDownCounter, + AsyncInstrument, Callback, CallbackRegistration, Counter, Gauge, Histogram, + InstrumentProvider, MetricsError, ObservableCounter, ObservableGauge, + ObservableUpDownCounter, Observer as ApiObserver, Result, Unit, UpDownCounter, }, KeyValue, }; @@ -299,6 +299,57 @@ impl InstrumentProvider for SdkMeter { Ok(ObservableUpDownCounter::new(observable)) } + fn u64_gauge( + &self, + name: Cow<'static, str>, + description: Option>, + unit: Option, + ) -> Result> { + validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + let p = InstrumentResolver::new(self, &self.u64_resolver); + p.lookup( + InstrumentKind::Gauge, + name, + description, + unit.unwrap_or_default(), + ) + .map(|i| Gauge::new(Arc::new(i))) + } + + fn f64_gauge( + &self, + name: Cow<'static, str>, + description: Option>, + unit: Option, + ) -> Result> { + validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + let p = InstrumentResolver::new(self, &self.f64_resolver); + p.lookup( + InstrumentKind::Gauge, + name, + description, + unit.unwrap_or_default(), + ) + .map(|i| Gauge::new(Arc::new(i))) + } + + fn i64_gauge( + &self, + name: Cow<'static, str>, + description: Option>, + unit: Option, + ) -> Result> { + validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + let p = InstrumentResolver::new(self, &self.i64_resolver); + p.lookup( + InstrumentKind::Gauge, + name, + description, + unit.unwrap_or_default(), + ) + .map(|i| Gauge::new(Arc::new(i))) + } + fn u64_observable_gauge( &self, name: Cow<'static, str>, @@ -784,6 +835,9 @@ mod tests { .f64_observable_up_down_counter(name.into(), None, None, Vec::new()) .map(|_| ()), ); + assert(meter.u64_gauge(name.into(), None, None).map(|_| ())); + assert(meter.f64_gauge(name.into(), None, None).map(|_| ())); + assert(meter.i64_gauge(name.into(), None, None).map(|_| ())); assert( meter .u64_observable_gauge(name.into(), None, None, Vec::new()) diff --git a/opentelemetry-sdk/src/metrics/pipeline.rs b/opentelemetry-sdk/src/metrics/pipeline.rs index 6ebd7c9e05..be0dde9736 100644 --- a/opentelemetry-sdk/src/metrics/pipeline.rs +++ b/opentelemetry-sdk/src/metrics/pipeline.rs @@ -536,6 +536,7 @@ fn aggregate_fn>( /// | Histogram | ✓ | | ✓ | ✓ | ✓ | /// | Observable Counter | ✓ | | ✓ | ✓ | ✓ | /// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +/// | Gauge | ✓ | ✓ | | ✓ | ✓ | /// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ | fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregation) -> Result<()> { use aggregation::Aggregation; @@ -547,6 +548,7 @@ fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregatio kind, InstrumentKind::Counter | InstrumentKind::UpDownCounter + | InstrumentKind::Gauge | InstrumentKind::Histogram | InstrumentKind::ObservableCounter | InstrumentKind::ObservableUpDownCounter @@ -571,12 +573,14 @@ fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregatio } } Aggregation::LastValue => { - if kind == &InstrumentKind::ObservableGauge { - return Ok(()); + match kind { + InstrumentKind::Gauge | InstrumentKind::ObservableGauge => Ok(()), + _ => { + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + Err(MetricsError::Other("incompatible aggregation".into())) + } } - // TODO: review need for aggregation check after - // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 - Err(MetricsError::Other("incompatible aggregation".into())) } Aggregation::Drop => Ok(()), } diff --git a/opentelemetry-sdk/src/metrics/reader.rs b/opentelemetry-sdk/src/metrics/reader.rs index 11cd9ae060..f53e507dc1 100644 --- a/opentelemetry-sdk/src/metrics/reader.rs +++ b/opentelemetry-sdk/src/metrics/reader.rs @@ -121,6 +121,7 @@ where /// * Observable Counter ⇨ Sum /// * UpDownCounter ⇨ Sum /// * Observable UpDownCounter ⇨ Sum +/// * Gauge ⇨ LastValue /// * Observable Gauge ⇨ LastValue /// * Histogram ⇨ ExplicitBucketHistogram /// @@ -144,6 +145,7 @@ impl AggregationSelector for DefaultAggregationSelector { | InstrumentKind::UpDownCounter | InstrumentKind::ObservableCounter | InstrumentKind::ObservableUpDownCounter => Aggregation::Sum, + InstrumentKind::Gauge => Aggregation::LastValue, InstrumentKind::ObservableGauge => Aggregation::LastValue, InstrumentKind::Histogram => Aggregation::ExplicitBucketHistogram { boundaries: vec![ diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 77c5b52a93..b57647e5ab 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -2,6 +2,12 @@ ## vNext +### Added + +- [#1410](https://github.com/open-telemetry/opentelemetry-rust/pull/1410) Add experimental synchronous gauge. This is behind the feature flag, and can be enabled by enabling the feature `otel_unstable` for opentelemetry crate. + +- [#1410](https://github.com/open-telemetry/opentelemetry-rust/pull/1410) Guidelines to add new unstable/experimental features. + ### Changed - Modified `AnyValue.Map` to be backed by `HashMap` instead of custom `OrderMap`, diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 957d2d6a49..2c6726db4c 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -38,6 +38,7 @@ metrics = [] testing = ["trace", "metrics"] logs = [] logs_level_enabled = ["logs"] +otel_unstable = [] [dev-dependencies] opentelemetry_sdk = { path = "../opentelemetry-sdk" } # for documentation tests diff --git a/opentelemetry/src/metrics/instruments/gauge.rs b/opentelemetry/src/metrics/instruments/gauge.rs index b9f082d83a..ab9fb2e05d 100644 --- a/opentelemetry/src/metrics/instruments/gauge.rs +++ b/opentelemetry/src/metrics/instruments/gauge.rs @@ -1,12 +1,76 @@ use crate::{ - metrics::{AsyncInstrument, AsyncInstrumentBuilder, MetricsError}, + metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}, KeyValue, }; use core::fmt; use std::sync::Arc; use std::{any::Any, convert::TryFrom}; -/// An instrument that records independent readings. +/// An SDK implemented instrument that records independent values +pub trait SyncGauge { + /// Records an independent value. + fn record(&self, value: T, attributes: &[KeyValue]); +} + +/// An instrument that records independent values +#[derive(Clone)] +pub struct Gauge(Arc + Send + Sync>); + +impl fmt::Debug for Gauge +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("Gauge<{}>", std::any::type_name::())) + } +} + +impl Gauge { + /// Create a new gauge. + pub fn new(inner: Arc + Send + Sync>) -> Self { + Gauge(inner) + } + + /// Records an independent value. + pub fn record(&self, value: T, attributes: &[KeyValue]) { + self.0.record(value, attributes) + } +} + +impl TryFrom>> for Gauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { + builder + .meter + .instrument_provider + .u64_gauge(builder.name, builder.description, builder.unit) + } +} + +impl TryFrom>> for Gauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { + builder + .meter + .instrument_provider + .f64_gauge(builder.name, builder.description, builder.unit) + } +} + +impl TryFrom>> for Gauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { + builder + .meter + .instrument_provider + .i64_gauge(builder.name, builder.description, builder.unit) + } +} + +/// An async instrument that records independent readings. #[derive(Clone)] pub struct ObservableGauge(Arc>); diff --git a/opentelemetry/src/metrics/meter.rs b/opentelemetry/src/metrics/meter.rs index a66a77e55c..f64fae6976 100644 --- a/opentelemetry/src/metrics/meter.rs +++ b/opentelemetry/src/metrics/meter.rs @@ -3,6 +3,8 @@ use std::any::Any; use std::borrow::Cow; use std::sync::Arc; +#[cfg(feature = "otel_unstable")] +use crate::metrics::Gauge; use crate::metrics::{ AsyncInstrumentBuilder, Counter, Histogram, InstrumentBuilder, InstrumentProvider, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Result, UpDownCounter, @@ -333,6 +335,39 @@ impl Meter { AsyncInstrumentBuilder::new(self, name.into()) } + /// # Experimental + /// This method is experimental and can be changed/removed in future releases. + /// creates an instrument builder for recording independent values. + #[cfg(feature = "otel_unstable")] + pub fn u64_gauge( + &self, + name: impl Into>, + ) -> InstrumentBuilder<'_, Gauge> { + InstrumentBuilder::new(self, name.into()) + } + + /// # Experimental + /// This method is experimental and can be changed/removed in future releases. + /// creates an instrument builder for recording independent values. + #[cfg(feature = "otel_unstable")] + pub fn f64_gauge( + &self, + name: impl Into>, + ) -> InstrumentBuilder<'_, Gauge> { + InstrumentBuilder::new(self, name.into()) + } + + /// # Experimental + /// This method is experimental and can be changed/removed in future releases. + /// creates an instrument builder for recording indenpendent values. + #[cfg(feature = "otel_unstable")] + pub fn i64_gauge( + &self, + name: impl Into>, + ) -> InstrumentBuilder<'_, Gauge> { + InstrumentBuilder::new(self, name.into()) + } + /// creates an instrument builder for recording the current value via callback. pub fn u64_observable_gauge( &self, diff --git a/opentelemetry/src/metrics/mod.rs b/opentelemetry/src/metrics/mod.rs index 4241363411..34ed16fa39 100644 --- a/opentelemetry/src/metrics/mod.rs +++ b/opentelemetry/src/metrics/mod.rs @@ -13,7 +13,7 @@ pub mod noop; use crate::ExportError; pub use instruments::{ counter::{Counter, ObservableCounter, SyncCounter}, - gauge::ObservableGauge, + gauge::{Gauge, ObservableGauge, SyncGauge}, histogram::{Histogram, SyncHistogram}, up_down_counter::{ObservableUpDownCounter, SyncUpDownCounter, UpDownCounter}, AsyncInstrument, AsyncInstrumentBuilder, Callback, InstrumentBuilder, @@ -179,6 +179,36 @@ pub trait InstrumentProvider { ))) } + /// creates an instrument for recording independent values. + fn u64_gauge( + &self, + _name: Cow<'static, str>, + _description: Option>, + _unit: Option, + ) -> Result> { + Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording independent values. + fn f64_gauge( + &self, + _name: Cow<'static, str>, + _description: Option>, + _unit: Option, + ) -> Result> { + Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording independent values. + fn i64_gauge( + &self, + _name: Cow<'static, str>, + _description: Option>, + _unit: Option, + ) -> Result> { + Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + /// creates an instrument for recording the current value via callback. fn u64_observable_gauge( &self, diff --git a/opentelemetry/src/metrics/noop.rs b/opentelemetry/src/metrics/noop.rs index 361bed29e7..adf4b03da3 100644 --- a/opentelemetry/src/metrics/noop.rs +++ b/opentelemetry/src/metrics/noop.rs @@ -6,7 +6,7 @@ use crate::{ metrics::{ AsyncInstrument, CallbackRegistration, InstrumentProvider, Meter, MeterProvider, Observer, - Result, SyncCounter, SyncHistogram, SyncUpDownCounter, + Result, SyncCounter, SyncGauge, SyncHistogram, SyncUpDownCounter, }, KeyValue, }; @@ -110,6 +110,12 @@ impl SyncHistogram for NoopSyncInstrument { } } +impl SyncGauge for NoopSyncInstrument { + fn record(&self, _value: T, _attributes: &[KeyValue]) { + // Ignored + } +} + /// A no-op async instrument. #[derive(Debug, Default)] pub struct NoopAsyncInstrument { From bd3ba65546217ef654b9e5edb32b783a2f1af95c Mon Sep 17 00:00:00 2001 From: Jacob Rothstein Date: Tue, 12 Dec 2023 15:22:26 -0800 Subject: [PATCH 61/68] Add await to async trace documentation (#1440) --- opentelemetry/src/trace/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/opentelemetry/src/trace/mod.rs b/opentelemetry/src/trace/mod.rs index 038064ca49..3b959d3e0f 100644 --- a/opentelemetry/src/trace/mod.rs +++ b/opentelemetry/src/trace/mod.rs @@ -151,6 +151,7 @@ //! use opentelemetry::{Context, global, trace::{FutureExt, TraceContextExt, Tracer}}; //! //! async fn some_work() { } +//! # async fn in_an_async_context() { //! //! // Get a tracer //! let tracer = global::tracer("my_tracer"); @@ -159,7 +160,8 @@ //! let span = tracer.start("my_span"); //! //! // Perform some async work with this span as the currently active parent. -//! some_work().with_context(Context::current_with_span(span)); +//! some_work().with_context(Context::current_with_span(span)).await; +//! # } //! ``` use std::borrow::Cow; From fdacd58e70561ba9d8d1c8c46f54615609b7fb41 Mon Sep 17 00:00:00 2001 From: Olivier Soucy Date: Tue, 12 Dec 2023 20:01:39 -0500 Subject: [PATCH 62/68] Bench for logs (#1431) --- opentelemetry-sdk/Cargo.toml | 5 ++ opentelemetry-sdk/benches/log.rs | 119 +++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 opentelemetry-sdk/benches/log.rs diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 4de782d350..ddc2baf851 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -86,3 +86,8 @@ required-features = ["rt-tokio", "testing"] name = "metric" harness = false required-features = ["metrics"] + +[[bench]] +name = "log" +harness = false +required-features = ["logs"] diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs new file mode 100644 index 0000000000..ef050b602a --- /dev/null +++ b/opentelemetry-sdk/benches/log.rs @@ -0,0 +1,119 @@ +use std::time::SystemTime; + +use async_trait::async_trait; +use criterion::{criterion_group, criterion_main, Criterion}; + +use opentelemetry::logs::{LogRecord, LogResult, Logger, LoggerProvider as _, Severity}; +use opentelemetry::trace::Tracer; +use opentelemetry::trace::TracerProvider as _; +use opentelemetry_sdk::export::logs::{LogData, LogExporter}; +use opentelemetry_sdk::logs::LoggerProvider; +use opentelemetry_sdk::trace::{config, Sampler, TracerProvider}; + +#[derive(Debug)] +struct VoidExporter; + +#[async_trait] +impl LogExporter for VoidExporter { + async fn export(&mut self, _batch: Vec) -> LogResult<()> { + LogResult::Ok(()) + } +} + +fn log_benchmark_group(c: &mut Criterion, name: &str, f: F) { + let mut group = c.benchmark_group(name); + + group.bench_function("no-context", |b| { + let provider = LoggerProvider::builder() + .with_simple_exporter(VoidExporter) + .build(); + + let logger = provider.logger("no-context"); + + b.iter(|| f(&logger)); + }); + + group.bench_function("with-context", |b| { + let provider = LoggerProvider::builder() + .with_simple_exporter(VoidExporter) + .build(); + + let logger = provider.logger("with-context"); + + // setup tracing as well. + let tracer_provider = TracerProvider::builder() + .with_config(config().with_sampler(Sampler::AlwaysOn)) + .build(); + let tracer = tracer_provider.tracer("bench-tracer"); + + // Act + tracer.in_span("bench-span", |_cx| { + b.iter(|| f(&logger)); + }); + }); + + group.finish(); +} + +fn criterion_benchmark(c: &mut Criterion) { + log_benchmark_group(c, "simple-log", |logger| { + logger.emit(LogRecord::builder().with_body("simple log".into()).build()) + }); + + log_benchmark_group(c, "long-log", |logger| { + logger.emit(LogRecord::builder().with_body("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Gravida in fermentum et sollicitudin ac orci phasellus. Ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at augue. Magna etiam tempor orci eu. Sed tempus urna et pharetra pharetra massa.".into()).build()) + }); + + let now = SystemTime::now(); + log_benchmark_group(c, "full-log", |logger| { + logger.emit( + LogRecord::builder() + .with_body("full log".into()) + .with_timestamp(now) + .with_observed_timestamp(now) + .with_severity_number(Severity::Warn) + .with_severity_text(Severity::Warn.name()) + .build(), + ) + }); + + log_benchmark_group(c, "full-log-with-4-attributes", |logger| { + logger.emit( + LogRecord::builder() + .with_body("full log".into()) + .with_timestamp(now) + .with_observed_timestamp(now) + .with_severity_number(Severity::Warn) + .with_severity_text(Severity::Warn.name()) + .with_attribute("name", "my-event-name") + .with_attribute("event.id", 20) + .with_attribute("user.name", "otel") + .with_attribute("user.email", "otel@opentelemetry.io") + .build(), + ) + }); + + log_benchmark_group(c, "full-log-with-9-attributes", |logger| { + logger.emit( + LogRecord::builder() + .with_body("full log".into()) + .with_timestamp(now) + .with_observed_timestamp(now) + .with_severity_number(Severity::Warn) + .with_severity_text(Severity::Warn.name()) + .with_attribute("name", "my-event-name") + .with_attribute("event.id", 20) + .with_attribute("user.name", "otel") + .with_attribute("user.email", "otel@opentelemetry.io") + .with_attribute("log.source.file.name", "log.rs") + .with_attribute("log.source.file.path", "opentelemetry_sdk/benches/log.rs") + .with_attribute("log.source.file.line", 96) + .with_attribute("log.module.path", "opentelemetry_sdk::benches::log") + .with_attribute("log.target", "opentelemetry_sdk::benches::log") + .build(), + ) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); From 7d0b80ea852eb3218504b722476484063802a9a4 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Tue, 12 Dec 2023 21:29:07 -0500 Subject: [PATCH 63/68] Fix delta aggregation metric reuse (#1434) --- opentelemetry-sdk/CHANGELOG.md | 4 + .../src/metrics/internal/aggregate.rs | 236 ++++++++++++++++++ .../metrics/internal/exponential_histogram.rs | 181 +++++--------- .../src/metrics/internal/histogram.rs | 122 +++------ .../src/metrics/internal/last_value.rs | 34 +-- opentelemetry-sdk/src/metrics/internal/sum.rs | 113 ++++----- 6 files changed, 396 insertions(+), 294 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index ddb2315b70..814379f9a5 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -29,6 +29,10 @@ - [#1375](https://github.com/open-telemetry/opentelemetry-rust/pull/1375/) Fix metric collections during PeriodicReader shutdown +### Fixed + +- Fix delta aggregation metric reuse. (#1434) + ## v0.21.1 ### Fixed diff --git a/opentelemetry-sdk/src/metrics/internal/aggregate.rs b/opentelemetry-sdk/src/metrics/internal/aggregate.rs index ccd0f26f88..08d6feec04 100644 --- a/opentelemetry-sdk/src/metrics/internal/aggregate.rs +++ b/opentelemetry-sdk/src/metrics/internal/aggregate.rs @@ -210,3 +210,239 @@ impl> AggregateBuilder { ) } } + +#[cfg(test)] +mod tests { + use crate::metrics::data::{ + DataPoint, ExponentialBucket, ExponentialHistogram, ExponentialHistogramDataPoint, + Histogram, HistogramDataPoint, Sum, + }; + use std::time::SystemTime; + + use super::*; + + #[test] + fn last_value_aggregation() { + let (measure, agg) = AggregateBuilder::::new(None, None).last_value(); + let mut a = Gauge { + data_points: vec![DataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a", 1)][..]), + start_time: Some(SystemTime::now()), + time: Some(SystemTime::now()), + value: 1u64, + exemplars: vec![], + }], + }; + let new_attributes = [KeyValue::new("b", 2)]; + measure.call(2, AttributeSet::from(&new_attributes[..])); + + let (count, new_agg) = agg.call(Some(&mut a)); + + assert_eq!(count, 1); + assert!(new_agg.is_none()); + assert_eq!(a.data_points.len(), 1); + assert_eq!( + a.data_points[0].attributes, + AttributeSet::from(&new_attributes[..]) + ); + assert_eq!(a.data_points[0].value, 2); + } + + #[test] + fn precomputed_sum_aggregation() { + for temporality in [Temporality::Delta, Temporality::Cumulative] { + let (measure, agg) = + AggregateBuilder::::new(Some(temporality), None).precomputed_sum(true); + let mut a = Sum { + data_points: vec![ + DataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a1", 1)][..]), + start_time: Some(SystemTime::now()), + time: Some(SystemTime::now()), + value: 1u64, + exemplars: vec![], + }, + DataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + start_time: Some(SystemTime::now()), + time: Some(SystemTime::now()), + value: 2u64, + exemplars: vec![], + }, + ], + temporality: if temporality == Temporality::Delta { + Temporality::Cumulative + } else { + Temporality::Delta + }, + is_monotonic: false, + }; + let new_attributes = [KeyValue::new("b", 2)]; + measure.call(3, AttributeSet::from(&new_attributes[..])); + + let (count, new_agg) = agg.call(Some(&mut a)); + + assert_eq!(count, 1); + assert!(new_agg.is_none()); + assert_eq!(a.temporality, temporality); + assert!(a.is_monotonic); + assert_eq!(a.data_points.len(), 1); + assert_eq!( + a.data_points[0].attributes, + AttributeSet::from(&new_attributes[..]) + ); + assert_eq!(a.data_points[0].value, 3); + } + } + + #[test] + fn sum_aggregation() { + for temporality in [Temporality::Delta, Temporality::Cumulative] { + let (measure, agg) = AggregateBuilder::::new(Some(temporality), None).sum(true); + let mut a = Sum { + data_points: vec![ + DataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a1", 1)][..]), + start_time: Some(SystemTime::now()), + time: Some(SystemTime::now()), + value: 1u64, + exemplars: vec![], + }, + DataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + start_time: Some(SystemTime::now()), + time: Some(SystemTime::now()), + value: 2u64, + exemplars: vec![], + }, + ], + temporality: if temporality == Temporality::Delta { + Temporality::Cumulative + } else { + Temporality::Delta + }, + is_monotonic: false, + }; + let new_attributes = [KeyValue::new("b", 2)]; + measure.call(3, AttributeSet::from(&new_attributes[..])); + + let (count, new_agg) = agg.call(Some(&mut a)); + + assert_eq!(count, 1); + assert!(new_agg.is_none()); + assert_eq!(a.temporality, temporality); + assert!(a.is_monotonic); + assert_eq!(a.data_points.len(), 1); + assert_eq!( + a.data_points[0].attributes, + AttributeSet::from(&new_attributes[..]) + ); + assert_eq!(a.data_points[0].value, 3); + } + } + + #[test] + fn explicit_bucket_histogram_aggregation() { + for temporality in [Temporality::Delta, Temporality::Cumulative] { + let (measure, agg) = AggregateBuilder::::new(Some(temporality), None) + .explicit_bucket_histogram(vec![1.0], true, true); + let mut a = Histogram { + data_points: vec![HistogramDataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + start_time: SystemTime::now(), + time: SystemTime::now(), + count: 2, + bounds: vec![1.0, 2.0], + bucket_counts: vec![0, 1, 1], + min: None, + max: None, + sum: 3u64, + exemplars: vec![], + }], + temporality: if temporality == Temporality::Delta { + Temporality::Cumulative + } else { + Temporality::Delta + }, + }; + let new_attributes = [KeyValue::new("b", 2)]; + measure.call(3, AttributeSet::from(&new_attributes[..])); + + let (count, new_agg) = agg.call(Some(&mut a)); + + assert_eq!(count, 1); + assert!(new_agg.is_none()); + assert_eq!(a.temporality, temporality); + assert_eq!(a.data_points.len(), 1); + assert_eq!( + a.data_points[0].attributes, + AttributeSet::from(&new_attributes[..]) + ); + assert_eq!(a.data_points[0].count, 1); + assert_eq!(a.data_points[0].bounds, vec![1.0]); + assert_eq!(a.data_points[0].bucket_counts, vec![0, 1]); + assert_eq!(a.data_points[0].min, Some(3)); + assert_eq!(a.data_points[0].max, Some(3)); + assert_eq!(a.data_points[0].sum, 3); + } + } + + #[test] + fn exponential_histogram_aggregation() { + for temporality in [Temporality::Delta, Temporality::Cumulative] { + let (measure, agg) = AggregateBuilder::::new(Some(temporality), None) + .exponential_bucket_histogram(4, 20, true, true); + let mut a = ExponentialHistogram { + data_points: vec![ExponentialHistogramDataPoint { + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + start_time: SystemTime::now(), + time: SystemTime::now(), + count: 2, + min: None, + max: None, + sum: 3u64, + scale: 10, + zero_count: 1, + positive_bucket: ExponentialBucket { + offset: 1, + counts: vec![1], + }, + negative_bucket: ExponentialBucket { + offset: 1, + counts: vec![1], + }, + zero_threshold: 1.0, + exemplars: vec![], + }], + temporality: if temporality == Temporality::Delta { + Temporality::Cumulative + } else { + Temporality::Delta + }, + }; + let new_attributes = [KeyValue::new("b", 2)]; + measure.call(3, AttributeSet::from(&new_attributes[..])); + + let (count, new_agg) = agg.call(Some(&mut a)); + + assert_eq!(count, 1); + assert!(new_agg.is_none()); + assert_eq!(a.temporality, temporality); + assert_eq!(a.data_points.len(), 1); + assert_eq!( + a.data_points[0].attributes, + AttributeSet::from(&new_attributes[..]) + ); + assert_eq!(a.data_points[0].count, 1); + assert_eq!(a.data_points[0].min, Some(3)); + assert_eq!(a.data_points[0].max, Some(3)); + assert_eq!(a.data_points[0].sum, 3); + assert_eq!(a.data_points[0].zero_count, 0); + assert_eq!(a.data_points[0].zero_threshold, 0.0); + assert_eq!(a.data_points[0].positive_bucket.offset, 1661953); + assert_eq!(a.data_points[0].positive_bucket.counts, vec![1]); + assert_eq!(a.data_points[0].negative_bucket.offset, 0); + assert!(a.data_points[0].negative_bucket.counts.is_empty()); + } + } +} diff --git a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs index ec0b70d003..189b61c553 100644 --- a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs @@ -1,9 +1,4 @@ -use std::{ - collections::HashMap, - f64::consts::LOG2_E, - sync::Mutex, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{collections::HashMap, f64::consts::LOG2_E, sync::Mutex, time::SystemTime}; use once_cell::sync::Lazy; use opentelemetry::metrics::MetricsError; @@ -387,6 +382,7 @@ impl> ExpoHistogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Delta; + h.data_points.clear(); let mut values = match self.values.lock() { Ok(g) => g, @@ -395,62 +391,39 @@ impl> ExpoHistogram { let n = values.len(); if n > h.data_points.capacity() { - h.data_points.reserve(n - h.data_points.capacity()); + h.data_points.reserve_exact(n - h.data_points.capacity()); } - for (i, (a, mut b)) in values.drain().enumerate() { - let el = match h.data_points.get_mut(i) { - Some(el) => el, - None => { - h.data_points.push(data::ExponentialHistogramDataPoint { - attributes: AttributeSet::default(), - start_time: UNIX_EPOCH, - time: UNIX_EPOCH, - count: 0, - min: None, - max: None, - sum: T::default(), - scale: 0, - zero_count: 0, - positive_bucket: data::ExponentialBucket { - offset: 0, - counts: vec![], - }, - negative_bucket: data::ExponentialBucket { - offset: 0, - counts: vec![], - }, - zero_threshold: 0.0, - exemplars: vec![], - }); - h.data_points.get_mut(i).unwrap() - } - }; - el.attributes = a; - el.start_time = start; - el.time = t; - el.count = b.count; - el.scale = b.scale; - el.zero_count = b.zero_count; - el.zero_threshold = 0.0; - - el.positive_bucket.offset = b.pos_buckets.start_bin; - el.positive_bucket.counts.clear(); - el.positive_bucket.counts.append(&mut b.pos_buckets.counts); - - el.negative_bucket.offset = b.neg_buckets.start_bin; - el.negative_bucket.counts.clear(); - el.negative_bucket.counts.append(&mut b.neg_buckets.counts); - - el.sum = if self.record_sum { b.sum } else { T::default() }; - - if self.record_min_max { - el.min = Some(b.min); - el.max = Some(b.max); - } else { - el.min = None; - el.max = None; - } + for (a, b) in values.drain() { + h.data_points.push(data::ExponentialHistogramDataPoint { + attributes: a, + start_time: start, + time: t, + count: b.count, + min: if self.record_min_max { + Some(b.min) + } else { + None + }, + max: if self.record_min_max { + Some(b.max) + } else { + None + }, + sum: if self.record_sum { b.sum } else { T::default() }, + scale: b.scale, + zero_count: b.zero_count, + positive_bucket: data::ExponentialBucket { + offset: b.pos_buckets.start_bin, + counts: b.pos_buckets.counts.clone(), + }, + negative_bucket: data::ExponentialBucket { + offset: b.neg_buckets.start_bin, + counts: b.neg_buckets.counts.clone(), + }, + zero_threshold: 0.0, + exemplars: vec![], + }); } // The delta collection cycle resets. @@ -488,71 +461,47 @@ impl> ExpoHistogram { Ok(g) => g, Err(_) => return (0, None), }; + h.data_points.clear(); let n = values.len(); if n > h.data_points.capacity() { - h.data_points.reserve(n - h.data_points.capacity()); + h.data_points.reserve_exact(n - h.data_points.capacity()); } // TODO: This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. - for (i, (a, b)) in values.iter().enumerate() { - let el = match h.data_points.get_mut(i) { - Some(el) => el, - None => { - h.data_points.push(data::ExponentialHistogramDataPoint { - attributes: AttributeSet::default(), - start_time: UNIX_EPOCH, - time: UNIX_EPOCH, - count: 0, - min: None, - max: None, - sum: T::default(), - scale: 0, - zero_count: 0, - positive_bucket: data::ExponentialBucket { - offset: 0, - counts: vec![], - }, - negative_bucket: data::ExponentialBucket { - offset: 0, - counts: vec![], - }, - zero_threshold: 0.0, - exemplars: vec![], - }); - h.data_points.get_mut(i).unwrap() - } - }; - el.attributes = a.clone(); - el.start_time = start; - el.time = t; - el.count = b.count; - el.scale = b.scale; - el.zero_count = b.zero_count; - el.zero_threshold = 0.0; - - el.positive_bucket.offset = b.pos_buckets.start_bin; - el.positive_bucket.counts.clear(); - el.positive_bucket - .counts - .extend_from_slice(&b.pos_buckets.counts); - - el.negative_bucket.offset = b.neg_buckets.start_bin; - el.negative_bucket.counts.clear(); - el.negative_bucket - .counts - .extend_from_slice(&b.neg_buckets.counts); - - if self.record_sum { - el.sum = b.sum; - } - if self.record_min_max { - el.min = Some(b.min); - el.max = Some(b.max); - } + for (a, b) in values.iter() { + h.data_points.push(data::ExponentialHistogramDataPoint { + attributes: a.clone(), + start_time: start, + time: t, + count: b.count, + min: if self.record_min_max { + Some(b.min) + } else { + None + }, + max: if self.record_min_max { + Some(b.max) + } else { + None + }, + sum: if self.record_sum { b.sum } else { T::default() }, + scale: b.scale, + zero_count: b.zero_count, + positive_bucket: data::ExponentialBucket { + offset: b.pos_buckets.start_bin, + counts: b.pos_buckets.counts.clone(), + }, + negative_bucket: data::ExponentialBucket { + offset: b.neg_buckets.start_bin, + counts: b.neg_buckets.counts.clone(), + }, + zero_threshold: 0.0, + exemplars: vec![], + }); } (n, new_agg.map(|a| Box::new(a) as Box<_>)) diff --git a/opentelemetry-sdk/src/metrics/internal/histogram.rs b/opentelemetry-sdk/src/metrics/internal/histogram.rs index 535b9a8586..45ac569e2b 100644 --- a/opentelemetry-sdk/src/metrics/internal/histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/histogram.rs @@ -156,62 +156,38 @@ impl> Histogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Delta; + h.data_points.clear(); let n = values.len(); if n > h.data_points.capacity() { h.data_points.reserve_exact(n - h.data_points.capacity()); } - for (i, (a, b)) in values.drain().enumerate() { - if let Some(dp) = h.data_points.get_mut(i) { - dp.attributes = a; - dp.start_time = start; - dp.time = t; - dp.count = b.count; - dp.bounds = self.hist_values.bounds.clone(); - dp.bucket_counts = b.counts.clone(); - dp.sum = if self.hist_values.record_sum { + for (a, b) in values.drain() { + h.data_points.push(HistogramDataPoint { + attributes: a, + start_time: start, + time: t, + count: b.count, + bounds: self.hist_values.bounds.clone(), + bucket_counts: b.counts.clone(), + sum: if self.hist_values.record_sum { b.total } else { T::default() - }; - dp.min = if self.record_min_max { + }, + min: if self.record_min_max { Some(b.min) } else { None - }; - dp.max = if self.record_min_max { + }, + max: if self.record_min_max { Some(b.max) } else { None - }; - dp.exemplars.clear(); - } else { - h.data_points.push(HistogramDataPoint { - attributes: a, - start_time: start, - time: t, - count: b.count, - bounds: self.hist_values.bounds.clone(), - bucket_counts: b.counts.clone(), - sum: if self.hist_values.record_sum { - b.total - } else { - T::default() - }, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - exemplars: vec![], - }); - }; + }, + exemplars: vec![], + }); } // The delta collection cycle resets. @@ -219,8 +195,6 @@ impl> Histogram { *start = t; } - h.data_points.truncate(n); - (n, new_agg.map(|a| Box::new(a) as Box<_>)) } @@ -249,70 +223,44 @@ impl> Histogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Cumulative; + h.data_points.clear(); let n = values.len(); if n > h.data_points.capacity() { - h.data_points.reserve(n - h.data_points.capacity()); + h.data_points.reserve_exact(n - h.data_points.capacity()); } // TODO: This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. - for (i, (a, b)) in values.iter().enumerate() { - if let Some(dp) = h.data_points.get_mut(i) { - dp.attributes = a.clone(); - dp.start_time = start; - dp.time = t; - dp.count = b.count; - dp.bounds = self.hist_values.bounds.clone(); - dp.bucket_counts = b.counts.clone(); - dp.sum = if self.hist_values.record_sum { + for (a, b) in values.iter() { + h.data_points.push(HistogramDataPoint { + attributes: a.clone(), + start_time: start, + time: t, + count: b.count, + bounds: self.hist_values.bounds.clone(), + bucket_counts: b.counts.clone(), + sum: if self.hist_values.record_sum { b.total } else { T::default() - }; - dp.min = if self.record_min_max { + }, + min: if self.record_min_max { Some(b.min) } else { None - }; - dp.max = if self.record_min_max { + }, + max: if self.record_min_max { Some(b.max) } else { None - }; - dp.exemplars.clear(); - } else { - h.data_points.push(HistogramDataPoint { - attributes: a.clone(), - start_time: start, - time: t, - count: b.count, - bounds: self.hist_values.bounds.clone(), - bucket_counts: b.counts.clone(), - sum: if self.hist_values.record_sum { - b.total - } else { - T::default() - }, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - exemplars: vec![], - }); - }; + }, + exemplars: vec![], + }); } - h.data_points.truncate(n); - (n, new_agg.map(|a| Box::new(a) as Box<_>)) } } diff --git a/opentelemetry-sdk/src/metrics/internal/last_value.rs b/opentelemetry-sdk/src/metrics/internal/last_value.rs index 57f35fc166..e5b2364b5b 100644 --- a/opentelemetry-sdk/src/metrics/internal/last_value.rs +++ b/opentelemetry-sdk/src/metrics/internal/last_value.rs @@ -53,37 +53,25 @@ impl> LastValue { } pub(crate) fn compute_aggregation(&self, dest: &mut Vec>) { + dest.clear(); let mut values = match self.values.lock() { Ok(guard) if !guard.is_empty() => guard, - _ => { - dest.clear(); // poisoned or no values recorded yet - return; - } + _ => return, }; let n = values.len(); if n > dest.capacity() { - dest.reserve(n - dest.capacity()); + dest.reserve_exact(n - dest.capacity()); } - for (i, (attrs, value)) in values.drain().enumerate() { - if let Some(dp) = dest.get_mut(i) { - dp.attributes = attrs; - dp.time = Some(value.timestamp); - dp.value = value.value; - dp.start_time = None; - dp.exemplars.clear(); - } else { - dest.push(DataPoint { - attributes: attrs, - time: Some(value.timestamp), - value: value.value, - start_time: None, - exemplars: vec![], - }); - } + for (attrs, value) in values.drain() { + dest.push(DataPoint { + attributes: attrs, + time: Some(value.timestamp), + value: value.value, + start_time: None, + exemplars: vec![], + }); } - - dest.truncate(n) } } diff --git a/opentelemetry-sdk/src/metrics/internal/sum.rs b/opentelemetry-sdk/src/metrics/internal/sum.rs index 3adcd24d23..3fac77c459 100644 --- a/opentelemetry-sdk/src/metrics/internal/sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/sum.rs @@ -96,6 +96,7 @@ impl> Sum { let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); s_data.temporality = Temporality::Delta; s_data.is_monotonic = self.monotonic; + s_data.data_points.clear(); let mut values = match self.value_map.values.lock() { Ok(v) => v, @@ -106,27 +107,20 @@ impl> Sum { if n > s_data.data_points.capacity() { s_data .data_points - .reserve(n - s_data.data_points.capacity()); + .reserve_exact(n - s_data.data_points.capacity()); } let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); - for (i, (attrs, value)) in values.drain().enumerate() { - if let Some(dp) = s_data.data_points.get_mut(i) { - dp.attributes = attrs; - dp.start_time = Some(prev_start); - dp.time = Some(t); - dp.value = value; - dp.exemplars.clear() - } else { - s_data.data_points.push(DataPoint { - attributes: attrs, - start_time: Some(prev_start), - time: Some(t), - value, - exemplars: vec![], - }); - } + for (attrs, value) in values.drain() { + s_data.data_points.push(DataPoint { + attributes: attrs, + start_time: Some(prev_start), + time: Some(t), + value, + exemplars: vec![], + }); } + // The delta collection cycle resets. if let Ok(mut start) = self.start.lock() { *start = t; @@ -154,6 +148,7 @@ impl> Sum { let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); s_data.temporality = Temporality::Cumulative; s_data.is_monotonic = self.monotonic; + s_data.data_points.clear(); let values = match self.value_map.values.lock() { Ok(v) => v, @@ -164,7 +159,7 @@ impl> Sum { if n > s_data.data_points.capacity() { s_data .data_points - .reserve(n - s_data.data_points.capacity()); + .reserve_exact(n - s_data.data_points.capacity()); } let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); @@ -172,22 +167,14 @@ impl> Sum { // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. - for (i, (attrs, value)) in values.iter().enumerate() { - if let Some(dp) = s_data.data_points.get_mut(i) { - dp.attributes = attrs.clone(); - dp.start_time = Some(prev_start); - dp.time = Some(t); - dp.value = *value; - dp.exemplars.clear() - } else { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: *value, - exemplars: vec![], - }); - } + for (attrs, value) in values.iter() { + s_data.data_points.push(DataPoint { + attributes: attrs.clone(), + start_time: Some(prev_start), + time: Some(t), + value: *value, + exemplars: vec![], + }); } (n, new_agg.map(|a| Box::new(a) as Box<_>)) @@ -234,6 +221,9 @@ impl> PrecomputedSum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); + s_data.data_points.clear(); + s_data.temporality = Temporality::Delta; + s_data.is_monotonic = self.monotonic; let mut values = match self.value_map.values.lock() { Ok(v) => v, @@ -244,7 +234,7 @@ impl> PrecomputedSum { if n > s_data.data_points.capacity() { s_data .data_points - .reserve(n - s_data.data_points.capacity()); + .reserve_exact(n - s_data.data_points.capacity()); } let mut new_reported = HashMap::with_capacity(n); let mut reported = match self.reported.lock() { @@ -253,26 +243,18 @@ impl> PrecomputedSum { }; let default = T::default(); - for (i, (attrs, value)) in values.drain().enumerate() { + for (attrs, value) in values.drain() { let delta = value - *reported.get(&attrs).unwrap_or(&default); if delta != default { new_reported.insert(attrs.clone(), value); } - if let Some(dp) = s_data.data_points.get_mut(i) { - dp.attributes = attrs.clone(); - dp.start_time = Some(prev_start); - dp.time = Some(t); - dp.value = delta; - dp.exemplars.clear(); - } else { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: delta, - exemplars: vec![], - }); - } + s_data.data_points.push(DataPoint { + attributes: attrs.clone(), + start_time: Some(prev_start), + time: Some(t), + value: delta, + exemplars: vec![], + }); } // The delta collection cycle resets. @@ -304,6 +286,9 @@ impl> PrecomputedSum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); + s_data.data_points.clear(); + s_data.temporality = Temporality::Cumulative; + s_data.is_monotonic = self.monotonic; let values = match self.value_map.values.lock() { Ok(v) => v, @@ -314,7 +299,7 @@ impl> PrecomputedSum { if n > s_data.data_points.capacity() { s_data .data_points - .reserve(n - s_data.data_points.capacity()); + .reserve_exact(n - s_data.data_points.capacity()); } let mut new_reported = HashMap::with_capacity(n); let mut reported = match self.reported.lock() { @@ -323,26 +308,18 @@ impl> PrecomputedSum { }; let default = T::default(); - for (i, (attrs, value)) in values.iter().enumerate() { + for (attrs, value) in values.iter() { let delta = *value - *reported.get(attrs).unwrap_or(&default); if delta != default { new_reported.insert(attrs.clone(), *value); } - if let Some(dp) = s_data.data_points.get_mut(i) { - dp.attributes = attrs.clone(); - dp.start_time = Some(prev_start); - dp.time = Some(t); - dp.value = delta; - dp.exemplars.clear(); - } else { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: delta, - exemplars: vec![], - }); - } + s_data.data_points.push(DataPoint { + attributes: attrs.clone(), + start_time: Some(prev_start), + time: Some(t), + value: delta, + exemplars: vec![], + }); } *reported = new_reported; From 4e80e3a2ebf96a166917b169f7d27e44e83cd3da Mon Sep 17 00:00:00 2001 From: Olivier Soucy Date: Wed, 13 Dec 2023 15:06:54 -0500 Subject: [PATCH 64/68] Fix opentelemetry appender tracing test (#1441) --- opentelemetry-appender-tracing/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index d38a6f9325..fa3d45819e 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -21,10 +21,12 @@ tracing-subscriber = { version = "0.3", default-features = false, features = ["r once_cell = "1.13.0" [dev-dependencies] +log = "0.4" opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } +opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs", "testing"] } +tracing-log = "0.2" [features] experimental_metadata_attributes = ["dep:tracing-log"] logs_level_enabled = ["opentelemetry/logs_level_enabled", "opentelemetry_sdk/logs_level_enabled"] default = ["logs_level_enabled"] -testing = ["opentelemetry_sdk/testing", "dep:tracing-log", "dep:log"] From 03b42ac4e4b58c8bf236511753f01aa1f02af5eb Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Mon, 18 Dec 2023 17:50:19 -0800 Subject: [PATCH 65/68] Fix changelog (#1447) --- opentelemetry-appender-tracing/CHANGELOG.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index 587e279113..ef1c4f3a4d 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -2,6 +2,14 @@ ## vNext +### Added + +- New experimental metadata attributes feature (experimental\_metadata\_attributes) [#1380](https://github.com/open-telemetry/opentelemetry-rust/pull/1380) + - Experimental new attributes for tracing metadata + - Fixes the following for events emitted using log crate + - Normalized metadata fields + - Remove redundant metadata + ## v0.2.0 ### Changed @@ -11,11 +19,6 @@ ### Added - Add log appender versions to loggers (#1182) -- New experimental metadata attributes feature (experimental\_metadata\_attributes) [#1380](https://github.com/open-telemetry/opentelemetry-rust/pull/1380) - - Experimental new attributes for tracing metadata - - Fixes the following for events emitted using log crate - - Normalized metadata fields - - Remove redundant metadata ## v0.1.0 From d3497392fb30a73206c3d6e798286f0051e685ef Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Mon, 18 Dec 2023 18:33:27 -0800 Subject: [PATCH 66/68] Minor doc fix to reflect correct crate for runtime (#1449) Fixes https://github.com/open-telemetry/opentelemetry-rust/issues/1357 --- opentelemetry-jaeger/README.md | 2 +- opentelemetry-otlp/README.md | 2 +- opentelemetry-otlp/src/lib.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opentelemetry-jaeger/README.md b/opentelemetry-jaeger/README.md index c5a876b0ea..8678fc2520 100644 --- a/opentelemetry-jaeger/README.md +++ b/opentelemetry-jaeger/README.md @@ -68,7 +68,7 @@ automatically. ```toml [dependencies] -opentelemetry = { version = "*", features = ["rt-tokio"] } +opentelemetry_sdk = { version = "*", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "*", features = ["rt-tokio"] } ``` diff --git a/opentelemetry-otlp/README.md b/opentelemetry-otlp/README.md index ea0eaca9e9..d28d1d9f46 100644 --- a/opentelemetry-otlp/README.md +++ b/opentelemetry-otlp/README.md @@ -70,7 +70,7 @@ automatically. ```toml [dependencies] -opentelemetry = { version = "*", features = ["async-std"] } +opentelemetry_sdk = { version = "*", features = ["async-std"] } opentelemetry-otlp = { version = "*", features = ["grpc-sys"] } ``` diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 1c8b41390e..b886ad0601 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -1,4 +1,4 @@ -//! The OTLP Exporter supports exporting trace and metric data in the OTLP +//! The OTLP Exporter supports exporting logs, metrics and traces in the OTLP //! format to the OpenTelemetry collector or other compatible backend. //! //! The OpenTelemetry Collector offers a vendor-agnostic implementation on how From 09ae384174dd79cce124357a648058e198b3ca61 Mon Sep 17 00:00:00 2001 From: Bouke van der Bijl Date: Tue, 19 Dec 2023 07:20:42 +0100 Subject: [PATCH 67/68] Add impl Into to some logs methods (#1442) This adds `impl Into` to two logs methods, the first to match the equivalent metrics/traces method and the other to make `with_body` more ergonomic so you can do `with_body("hello")` --- opentelemetry-sdk/benches/log.rs | 10 +++++----- opentelemetry/src/global/logs.rs | 2 +- opentelemetry/src/logs/record.rs | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index ef050b602a..0bdeb01ded 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -57,18 +57,18 @@ fn log_benchmark_group(c: &mut Criterion, name: &str, f: F) fn criterion_benchmark(c: &mut Criterion) { log_benchmark_group(c, "simple-log", |logger| { - logger.emit(LogRecord::builder().with_body("simple log".into()).build()) + logger.emit(LogRecord::builder().with_body("simple log").build()) }); log_benchmark_group(c, "long-log", |logger| { - logger.emit(LogRecord::builder().with_body("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Gravida in fermentum et sollicitudin ac orci phasellus. Ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at augue. Magna etiam tempor orci eu. Sed tempus urna et pharetra pharetra massa.".into()).build()) + logger.emit(LogRecord::builder().with_body("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Gravida in fermentum et sollicitudin ac orci phasellus. Ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at augue. Magna etiam tempor orci eu. Sed tempus urna et pharetra pharetra massa.").build()) }); let now = SystemTime::now(); log_benchmark_group(c, "full-log", |logger| { logger.emit( LogRecord::builder() - .with_body("full log".into()) + .with_body("full log") .with_timestamp(now) .with_observed_timestamp(now) .with_severity_number(Severity::Warn) @@ -80,7 +80,7 @@ fn criterion_benchmark(c: &mut Criterion) { log_benchmark_group(c, "full-log-with-4-attributes", |logger| { logger.emit( LogRecord::builder() - .with_body("full log".into()) + .with_body("full log") .with_timestamp(now) .with_observed_timestamp(now) .with_severity_number(Severity::Warn) @@ -96,7 +96,7 @@ fn criterion_benchmark(c: &mut Criterion) { log_benchmark_group(c, "full-log-with-9-attributes", |logger| { logger.emit( LogRecord::builder() - .with_body("full log".into()) + .with_body("full log") .with_timestamp(now) .with_observed_timestamp(now) .with_severity_number(Severity::Warn) diff --git a/opentelemetry/src/global/logs.rs b/opentelemetry/src/global/logs.rs index e0b0da70c2..e2b53f43ee 100644 --- a/opentelemetry/src/global/logs.rs +++ b/opentelemetry/src/global/logs.rs @@ -112,7 +112,7 @@ pub fn logger_provider() -> GlobalLoggerProvider { /// If `name` is an empty string, the provider will use a default name. /// /// [`Logger`]: crate::logs::Logger -pub fn logger(name: Cow<'static, str>) -> BoxedLogger { +pub fn logger(name: impl Into>) -> BoxedLogger { logger_provider().logger(name) } diff --git a/opentelemetry/src/logs/record.rs b/opentelemetry/src/logs/record.rs index dba4c02ef6..1d380cff48 100644 --- a/opentelemetry/src/logs/record.rs +++ b/opentelemetry/src/logs/record.rs @@ -331,10 +331,10 @@ impl LogRecordBuilder { } /// Assign body - pub fn with_body(self, body: AnyValue) -> Self { + pub fn with_body(self, body: impl Into) -> Self { Self { record: LogRecord { - body: Some(body), + body: Some(body.into()), ..self.record }, } From 7f77ec69f498c4d4d7b604babc20efa79ef043f4 Mon Sep 17 00:00:00 2001 From: Jasper Zeng Date: Wed, 20 Dec 2023 10:13:13 +0800 Subject: [PATCH 68/68] feat: make trace state can be propagated for dropped sampler (#1435) --- opentelemetry-sdk/src/trace/mod.rs | 94 +++++++- opentelemetry-sdk/src/trace/tracer.rs | 296 ++++++++++++-------------- 2 files changed, 231 insertions(+), 159 deletions(-) diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index 932652d43b..ea1ab74856 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -43,12 +43,16 @@ mod tests { testing::trace::InMemorySpanExporterBuilder, trace::span_limit::{DEFAULT_MAX_EVENT_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN}, }; + use opentelemetry::testing::trace::TestSpan; + use opentelemetry::trace::{ + SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceState, + }; use opentelemetry::{ trace::{ Event, Link, Span, SpanBuilder, SpanContext, SpanId, TraceFlags, TraceId, Tracer, TracerProvider as _, }, - KeyValue, + Context, KeyValue, }; #[test] @@ -176,4 +180,92 @@ mod tests { assert_eq!(span.events.len(), DEFAULT_MAX_EVENT_PER_SPAN as usize); assert_eq!(span.events.dropped_count, DEFAULT_MAX_EVENT_PER_SPAN + 2); } + + #[test] + fn trace_state_for_dropped_sampler() { + let exporter = InMemorySpanExporterBuilder::new().build(); + let provider = TracerProvider::builder() + .with_config(config().with_sampler(Sampler::AlwaysOff)) + .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) + .build(); + + let tracer = provider.tracer("test"); + let trace_state = TraceState::from_key_value(vec![("foo", "bar")]).unwrap(); + + let parent_context = Context::new().with_span(TestSpan(SpanContext::new( + TraceId::from_u128(10000), + SpanId::from_u64(20), + TraceFlags::SAMPLED, + true, + trace_state.clone(), + ))); + + let span = tracer.start_with_context("span", &parent_context); + assert_eq!( + span.span_context().trace_state().get("foo"), + trace_state.get("foo") + ) + } + + #[derive(Clone, Debug, Default)] + struct TestRecordOnlySampler {} + + impl ShouldSample for TestRecordOnlySampler { + fn should_sample( + &self, + parent_context: Option<&Context>, + _trace_id: TraceId, + _name: &str, + _span_kind: &SpanKind, + _attributes: &[KeyValue], + _links: &[Link], + ) -> SamplingResult { + let trace_state = parent_context + .unwrap() + .span() + .span_context() + .trace_state() + .clone(); + SamplingResult { + decision: SamplingDecision::RecordOnly, + attributes: vec![KeyValue::new("record_only_key", "record_only_value")], + trace_state, + } + } + } + + #[test] + fn trace_state_for_record_only_sampler() { + let exporter = InMemorySpanExporterBuilder::new().build(); + let provider = TracerProvider::builder() + .with_config(config().with_sampler(TestRecordOnlySampler::default())) + .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) + .build(); + + let tracer = provider.tracer("test"); + let trace_state = TraceState::from_key_value(vec![("foo", "bar")]).unwrap(); + + let parent_context = Context::new().with_span(TestSpan(SpanContext::new( + TraceId::from_u128(10000), + SpanId::from_u64(20), + TraceFlags::SAMPLED, + true, + trace_state.clone(), + ))); + + let span = tracer.build_with_context( + SpanBuilder::from_name("span") + .with_attributes(vec![KeyValue::new("extra_attr_key", "extra_attr_value")]), + &parent_context, + ); + assert!(!span.span_context().trace_flags().is_sampled()); + assert_eq!( + span.exported_data().unwrap().attributes, + vec![ + KeyValue::new("extra_attr_key", "extra_attr_value"), + KeyValue::new("record_only_key", "record_only_value") + ] + ); + assert_eq!(span.span_context().trace_state().get("foo"), Some("bar")); + } } diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index cc5497b846..dcfe1e565d 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -11,15 +11,12 @@ use crate::{ trace::{ provider::{TracerProvider, TracerProviderInner}, span::{Span, SpanData}, - Config, SpanLimits, SpanLinks, + SpanLimits, SpanLinks, }, InstrumentationLibrary, }; use opentelemetry::{ - trace::{ - Link, SamplingDecision, SamplingResult, SpanBuilder, SpanContext, SpanId, SpanKind, - TraceContextExt, TraceFlags, TraceId, TraceState, - }, + trace::{SamplingDecision, SpanBuilder, SpanContext, SpanKind, TraceContextExt, TraceFlags}, Context, KeyValue, }; use std::fmt; @@ -67,57 +64,101 @@ impl Tracer { &self.instrumentation_lib } - /// Make a sampling decision using the provided sampler for the span and context. - #[allow(clippy::too_many_arguments)] - fn make_sampling_decision( + fn build_recording_span( &self, - parent_cx: &Context, - trace_id: TraceId, - name: &str, - span_kind: &SpanKind, - attributes: &[KeyValue], - links: &[Link], - config: &Config, - ) -> Option<(TraceFlags, Vec, TraceState)> { - let sampling_result = config.sampler.should_sample( - Some(parent_cx), - trace_id, - name, - span_kind, - attributes, - links, - ); + psc: &SpanContext, + sc: SpanContext, + mut builder: SpanBuilder, + attrs: Vec, + span_limits: SpanLimits, + ) -> Span { + let mut attribute_options = builder.attributes.take().unwrap_or_default(); + for extra_attr in attrs { + attribute_options.push(extra_attr); + } + let span_attributes_limit = span_limits.max_attributes_per_span as usize; + let dropped_attributes_count = attribute_options + .len() + .saturating_sub(span_attributes_limit); + attribute_options.truncate(span_attributes_limit); + let dropped_attributes_count = dropped_attributes_count as u32; + + // Links are available as Option> in the builder + // If it is None, then there are no links to process. + // In that case Span.Links will be default (empty Vec, 0 drop count) + // Otherwise, truncate Vec to keep until limits and use that in Span.Links. + // Store the count of excess links into Span.Links.dropped_count. + // There is no ability today to add Links after Span creation, + // but such a capability will be needed in the future + // once the spec for that stabilizes. + + let spans_links_limit = span_limits.max_links_per_span as usize; + let span_links: SpanLinks = if let Some(mut links) = builder.links.take() { + let dropped_count = links.len().saturating_sub(spans_links_limit); + links.truncate(spans_links_limit); + let link_attributes_limit = span_limits.max_attributes_per_link as usize; + for link in links.iter_mut() { + let dropped_attributes_count = + link.attributes.len().saturating_sub(link_attributes_limit); + link.attributes.truncate(link_attributes_limit); + link.dropped_attributes_count = dropped_attributes_count as u32; + } + SpanLinks { + links, + dropped_count: dropped_count as u32, + } + } else { + SpanLinks::default() + }; - self.process_sampling_result(sampling_result, parent_cx) - } + let SpanBuilder { + name, + start_time, + end_time, + events, + status, + .. + } = builder; - fn process_sampling_result( - &self, - sampling_result: SamplingResult, - parent_cx: &Context, - ) -> Option<(TraceFlags, Vec, TraceState)> { - match sampling_result { - SamplingResult { - decision: SamplingDecision::Drop, - .. - } => None, - SamplingResult { - decision: SamplingDecision::RecordOnly, - attributes, - trace_state, - } => { - let trace_flags = parent_cx.span().span_context().trace_flags(); - Some((trace_flags.with_sampled(false), attributes, trace_state)) + let start_time = start_time.unwrap_or_else(opentelemetry::time::now); + let end_time = end_time.unwrap_or(start_time); + let spans_events_limit = span_limits.max_events_per_span as usize; + let span_events: SpanEvents = if let Some(mut events) = events { + let dropped_count = events.len().saturating_sub(spans_events_limit); + events.truncate(spans_events_limit); + let event_attributes_limit = span_limits.max_attributes_per_event as usize; + for event in events.iter_mut() { + let dropped_attributes_count = event + .attributes + .len() + .saturating_sub(event_attributes_limit); + event.attributes.truncate(event_attributes_limit); + event.dropped_attributes_count = dropped_attributes_count as u32; } - SamplingResult { - decision: SamplingDecision::RecordAndSample, - attributes, - trace_state, - } => { - let trace_flags = parent_cx.span().span_context().trace_flags(); - Some((trace_flags.with_sampled(true), attributes, trace_state)) + SpanEvents { + events, + dropped_count: dropped_count as u32, } - } + } else { + SpanEvents::default() + }; + Span::new( + sc, + Some(SpanData { + parent_span_id: psc.span_id(), + span_kind: builder.span_kind.take().unwrap_or(SpanKind::Internal), + name, + start_time, + end_time, + attributes: attribute_options, + dropped_attributes_count, + events: span_events, + links: span_links, + status, + }), + self.clone(), + span_limits, + ) } } @@ -145,14 +186,13 @@ impl opentelemetry::trace::Tracer for Tracer { let provider = provider.unwrap(); let config = provider.config(); - let span_limits = config.span_limits; let span_id = builder .span_id .take() .unwrap_or_else(|| config.id_generator.new_span_id()); let span_kind = builder.span_kind.take().unwrap_or(SpanKind::Internal); - let mut parent_span_id = SpanId::INVALID; let trace_id; + let mut psc = &SpanContext::empty_context(); let parent_span = if parent_cx.has_active_span() { Some(parent_cx.span()) @@ -162,8 +202,8 @@ impl opentelemetry::trace::Tracer for Tracer { // Build context for sampling decision if let Some(sc) = parent_span.as_ref().map(|parent| parent.span_context()) { - parent_span_id = sc.span_id(); trace_id = sc.trace_id(); + psc = sc; } else { trace_id = builder .trace_id @@ -172,121 +212,61 @@ impl opentelemetry::trace::Tracer for Tracer { // In order to accommodate use cases like `tracing-opentelemetry` we there is the ability // to use pre-sampling. Otherwise, the standard method of sampling is followed. - let sampling_decision = if let Some(sampling_result) = builder.sampling_result.take() { - self.process_sampling_result(sampling_result, parent_cx) + let samplings_result = if let Some(sr) = builder.sampling_result.take() { + sr } else { - self.make_sampling_decision( - parent_cx, + config.sampler.should_sample( + Some(parent_cx), trace_id, &builder.name, &span_kind, builder.attributes.as_ref().unwrap_or(&Vec::new()), builder.links.as_deref().unwrap_or(&[]), - provider.config(), ) }; - let SpanBuilder { - name, - start_time, - end_time, - events, - status, - .. - } = builder; - + let trace_flags = parent_cx.span().span_context().trace_flags(); + let trace_state = samplings_result.trace_state; + let span_limits = config.span_limits; // Build optional inner context, `None` if not recording. - let mut span = if let Some((flags, extra_attrs, trace_state)) = sampling_decision { - let mut attribute_options = builder.attributes.take().unwrap_or_default(); - for extra_attr in extra_attrs { - attribute_options.push(extra_attr); + let mut span = match samplings_result.decision { + SamplingDecision::RecordAndSample => { + let sc = SpanContext::new( + trace_id, + span_id, + trace_flags.with_sampled(true), + false, + trace_state, + ); + self.build_recording_span( + psc, + sc, + builder, + samplings_result.attributes, + span_limits, + ) + } + SamplingDecision::RecordOnly => { + let sc = SpanContext::new( + trace_id, + span_id, + trace_flags.with_sampled(false), + false, + trace_state, + ); + self.build_recording_span( + psc, + sc, + builder, + samplings_result.attributes, + span_limits, + ) + } + SamplingDecision::Drop => { + let span_context = + SpanContext::new(trace_id, span_id, TraceFlags::default(), false, trace_state); + Span::new(span_context, None, self.clone(), span_limits) } - - let span_attributes_limit = span_limits.max_attributes_per_span as usize; - let dropped_attributes_count = attribute_options - .len() - .saturating_sub(span_attributes_limit); - attribute_options.truncate(span_attributes_limit); - let dropped_attributes_count = dropped_attributes_count as u32; - - // Links are available as Option> in the builder - // If it is None, then there are no links to process. - // In that case Span.Links will be default (empty Vec, 0 drop count) - // Otherwise, truncate Vec to keep until limits and use that in Span.Links. - // Store the count of excess links into Span.Links.dropped_count. - // There is no ability today to add Links after Span creation, - // but such a capability will be needed in the future - // once the spec for that stabilizes. - - let spans_links_limit = span_limits.max_links_per_span as usize; - let span_links: SpanLinks = if let Some(mut links) = builder.links.take() { - let dropped_count = links.len().saturating_sub(spans_links_limit); - links.truncate(spans_links_limit); - let link_attributes_limit = span_limits.max_attributes_per_link as usize; - for link in links.iter_mut() { - let dropped_attributes_count = - link.attributes.len().saturating_sub(link_attributes_limit); - link.attributes.truncate(link_attributes_limit); - link.dropped_attributes_count = dropped_attributes_count as u32; - } - SpanLinks { - links, - dropped_count: dropped_count as u32, - } - } else { - SpanLinks::default() - }; - - let start_time = start_time.unwrap_or_else(opentelemetry::time::now); - let end_time = end_time.unwrap_or(start_time); - let spans_events_limit = span_limits.max_events_per_span as usize; - let span_events: SpanEvents = if let Some(mut events) = events { - let dropped_count = events.len().saturating_sub(spans_events_limit); - events.truncate(spans_events_limit); - let event_attributes_limit = span_limits.max_attributes_per_event as usize; - for event in events.iter_mut() { - let dropped_attributes_count = event - .attributes - .len() - .saturating_sub(event_attributes_limit); - event.attributes.truncate(event_attributes_limit); - event.dropped_attributes_count = dropped_attributes_count as u32; - } - SpanEvents { - events, - dropped_count: dropped_count as u32, - } - } else { - SpanEvents::default() - }; - - let span_context = SpanContext::new(trace_id, span_id, flags, false, trace_state); - Span::new( - span_context, - Some(SpanData { - parent_span_id, - span_kind, - name, - start_time, - end_time, - attributes: attribute_options, - dropped_attributes_count, - events: span_events, - links: span_links, - status, - }), - self.clone(), - span_limits, - ) - } else { - let span_context = SpanContext::new( - trace_id, - span_id, - TraceFlags::default(), - false, - Default::default(), - ); - Span::new(span_context, None, self.clone(), span_limits) }; // Call `on_start` for all processors