From 613849da643d6d0b525e548063a6bae2d9604d2e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 27 Jul 2021 17:12:14 +0800 Subject: [PATCH] [7.x] Update opentelemetry-collector to v0.30.0 (backport #5749) (#5792) * Update opentelemetry-collector to v0.30.0 (#5749) * Update opentelemetry-collector to v0.30.0 * Replace glog * Adapt code to opentelemetry-collector API changes (cherry picked from commit 3de7384b17863cb705d537bebdbaff4e5f9d0414) # Conflicts: # NOTICE.txt # go.mod # go.sum * fix merge conflicts Co-authored-by: Andrew Wilkins --- NOTICE.txt | 5271 ++++++++------ beater/jaeger/grpc_test.go | 14 +- beater/jaeger/http_test.go | 2 +- beater/otlp/grpc.go | 4 +- beater/otlp/grpc_test.go | 110 +- go.mod | 29 +- go.sum | 382 +- .../receiver/otlpreceiver/mixin.go | 43 +- internal/otel_collector/CHANGELOG.md | 75 + internal/otel_collector/Makefile | 30 +- internal/otel_collector/cmd/otelcol/main.go | 8 +- .../otel_collector/cmd/otelcol/main_others.go | 2 +- .../cmd/otelcol/main_windows.go | 4 +- .../cmd/pdatagen/internal/base_fields.go | 69 + .../cmd/pdatagen/internal/base_slices.go | 212 +- .../cmd/pdatagen/internal/common_structs.go | 4 +- .../cmd/pdatagen/internal/log_structs.go | 4 +- .../cmd/pdatagen/internal/metrics_structs.go | 50 +- .../cmd/pdatagen/internal/resource_structs.go | 2 +- .../cmd/pdatagen/internal/trace_structs.go | 4 +- internal/otel_collector/cmd/pdatagen/main.go | 4 +- .../component/componenttest/nop_exporter.go | 6 +- .../component/componenttest/nop_extension.go | 6 +- .../component/componenttest/nop_processor.go | 6 +- .../component/componenttest/nop_receiver.go | 6 +- .../componenttest/shutdown_verifier.go | 2 +- internal/otel_collector/component/exporter.go | 4 + .../otel_collector/component/extension.go | 4 + internal/otel_collector/component/host.go | 2 +- .../otel_collector/component/processor.go | 4 + internal/otel_collector/component/receiver.go | 4 + .../config/configcheck/configcheck.go | 2 +- .../config/configgrpc/README.md | 5 +- .../config/configgrpc/configgrpc.go | 22 +- .../config/confighttp/confighttp.go | 26 +- .../config/configloader/config.go | 8 +- .../config/configparser/parser.go | 172 +- .../otel_collector/config/configtls/README.md | 8 + .../config/configtls/configtls.go | 42 +- .../config/internal/configsource/manager.go | 10 + internal/otel_collector/consumer/consumer.go | 2 +- .../consumer/consumererror/permanent.go | 4 +- .../consumer/consumererror/signalerrors.go | 8 +- .../consumer/consumerhelper/common.go | 4 +- .../consumer/consumerhelper/logs.go | 2 +- .../consumer/consumerhelper/metrics.go | 2 +- .../consumer/consumerhelper/traces.go | 2 +- .../consumer/consumertest/base_consumer.go | 1 + .../consumer/consumertest/consumer.go | 8 +- .../consumer/consumertest/err.go | 4 +- .../consumer/consumertest/nop.go | 2 +- .../consumer/consumertest/sink.go | 50 +- .../otel_collector/consumer/pdata/common.go | 814 --- internal/otel_collector/consumer/pdata/doc.go | 32 - .../consumer/pdata/generated_common.go | 207 - .../consumer/pdata/generated_log.go | 651 -- .../consumer/pdata/generated_metrics.go | 2595 ------- .../consumer/pdata/generated_resource.go | 54 - .../consumer/pdata/generated_trace.go | 1168 --- .../otel_collector/consumer/pdata/logs.go | 138 - .../otel_collector/consumer/pdata/metrics.go | 314 - .../otel_collector/consumer/pdata/spanid.go | 49 - .../consumer/pdata/timestamp.go | 38 - .../otel_collector/consumer/pdata/traceid.go | 49 - .../otel_collector/consumer/pdata/traces.go | 162 - .../otel_collector/consumer/simple/metrics.go | 361 - .../otel_collector/docs/metric-metadata.yaml | 6 +- .../otel_collector/docs/service-extensions.md | 2 +- .../simple => exporter/exporterhelper}/doc.go | 5 +- .../exporter/exporterhelper/logs.go | 10 +- .../exporter/exporterhelper/metrics.go | 13 +- .../exporter/exporterhelper/queued_retry.go | 55 +- .../exporterhelper/resource_to_label.go | 10 +- .../exporter/exporterhelper/traces.go | 10 +- .../fileexporter/doc.go} | 12 +- .../exporter/fileexporter/factory.go | 6 +- .../exporter/fileexporter/file_exporter.go | 10 +- .../exporter/jaegerexporter/doc.go | 3 +- .../exporter/jaegerexporter/exporter.go | 8 +- .../exporter/jaegerexporter/factory.go | 2 +- .../exporter/kafkaexporter/doc.go} | 16 +- .../exporter/kafkaexporter/factory.go | 6 +- .../kafkaexporter/jaeger_marshaler.go | 2 +- .../exporter/kafkaexporter/kafka_exporter.go | 2 +- .../exporter/kafkaexporter/marshaler.go | 15 +- .../exporter/kafkaexporter/otlp_marshaler.go | 84 - .../exporter/kafkaexporter/pdata_marshaler.go | 108 + .../loggingexporter/doc.go} | 16 +- .../exporter/loggingexporter/factory.go | 12 +- .../loggingexporter/logging_exporter.go | 102 +- .../exporter/opencensusexporter/doc.go | 16 + .../exporter/opencensusexporter/factory.go | 4 +- .../exporter/opencensusexporter/opencensus.go | 2 +- .../exporter/otlpexporter/doc.go | 16 + .../exporter/otlpexporter/factory.go | 6 +- .../exporter/otlpexporter/otlp.go | 16 +- .../exporter/otlphttpexporter/doc.go | 16 + .../exporter/otlphttpexporter/factory.go | 6 +- .../exporter/otlphttpexporter/otlp.go | 21 +- .../prometheusexporter/accumulator.go | 34 +- .../exporter/prometheusexporter/collector.go | 16 +- .../exporter/prometheusexporter/doc.go | 16 + .../exporter/prometheusexporter/factory.go | 2 +- .../exporter/prometheusexporter/prometheus.go | 2 +- .../prometheusremotewriteexporter/doc.go | 16 + .../prometheusremotewriteexporter/exporter.go | 11 +- .../prometheusremotewriteexporter/factory.go | 10 +- .../prometheusremotewriteexporter/helper.go | 10 +- .../exporter/zipkinexporter/doc.go | 16 + .../exporter/zipkinexporter/factory.go | 2 +- .../exporter/zipkinexporter/zipkin.go | 10 +- .../extension/ballastextension/README.md | 30 +- .../extension/ballastextension/config.go | 19 +- .../extension/ballastextension/factory.go | 6 +- .../ballastextension/memory_ballast.go | 32 +- .../ballastextension/testdata/config.yaml | 1 + .../testdata/config_invalid.yaml | 19 + internal/otel_collector/go.mod | 53 +- internal/otel_collector/go.sum | 522 +- .../internal/buildscripts/compare-apidiff.sh | 22 +- .../internal/cgroups/cgroup.go | 0 .../internal/cgroups/cgroups.go | 6 +- .../internal/cgroups/doc.go | 0 .../internal/cgroups/errors.go | 0 .../internal/cgroups/mountpoint.go | 0 .../internal/cgroups/subsys.go | 0 .../testdata/cgroups/cpu/cpu.cfs_period_us | 0 .../testdata/cgroups/cpu/cpu.cfs_quota_us | 0 .../testdata/cgroups/empty/cpu.cfs_quota_us | 0 .../testdata/cgroups/invalid/cpu.cfs_quota_us | 0 .../cgroups/undefined-period/cpu.cfs_quota_us | 0 .../cgroups/undefined/cpu.cfs_period_us | 0 .../cgroups/undefined/cpu.cfs_quota_us | 0 .../cgroups/testdata/proc/cgroups/cgroup | 0 .../cgroups/testdata/proc/cgroups/mountinfo | 0 .../testdata/proc/invalid-cgroup/cgroup | 0 .../testdata/proc/invalid-mountinfo/mountinfo | 0 .../testdata/proc/untranslatable/cgroup | 0 .../testdata/proc/untranslatable/mountinfo | 0 .../otel_collector/internal/data/.gitignore | 1 - .../otel_collector/internal/data/bytesid.go | 67 - .../collector/logs/v1/logs_service.pb.go | 552 -- .../collector/logs/v1/logs_service.pb.gw.go | 169 - .../metrics/v1/metrics_service.pb.go | 552 -- .../metrics/v1/metrics_service.pb.gw.go | 169 - .../collector/trace/v1/trace_config.pb.go | 1249 ---- .../collector/trace/v1/trace_service.pb.go | 552 -- .../collector/trace/v1/trace_service.pb.gw.go | 169 - .../trace/v1/trace_service_gateway_aliases.go | 80 - .../data/protogen/common/v1/common.pb.go | 1762 ----- .../internal/data/protogen/logs/v1/logs.pb.go | 1377 ---- .../data/protogen/metrics/v1/metrics.pb.go | 6320 ----------------- .../data/protogen/resource/v1/resource.pb.go | 378 - .../data/protogen/trace/v1/trace.pb.go | 2649 ------- .../otel_collector/internal/data/spanid.go | 104 - .../otel_collector/internal/data/traceid.go | 106 - .../internal/goldendataset/metrics_gen.go | 42 +- .../goldendataset/pict_metrics_gen.go | 8 +- .../goldendataset/resource_generator.go | 8 +- .../internal/goldendataset/span_generator.go | 8 +- .../testdata/pict_input_metrics.txt | 2 +- .../goldendataset/traces_generator.go | 2 +- .../internal/idutils/big_endian_converter.go | 2 +- .../err_or_sink_consumer.go | 2 +- .../internal/iruntime/mem_info.go | 26 + .../internal/iruntime/total_memory_linux.go | 19 +- .../internal/iruntime/total_memory_other.go | 13 +- .../otel_collector/internal/model/README.md | 9 - .../otel_collector/internal/model/decoder.go | 33 - .../otel_collector/internal/model/encoder.go | 33 - .../otel_collector/internal/model/encoding.go | 43 - .../internal/model/from_translator.go | 35 - .../internal/model/marshaler.go | 114 - .../internal/model/to_translator.go | 32 - .../internal/model/unmarshal.go | 114 - .../internal/otlp/from_translator.go | 38 - .../internal/otlp/json_decoder.go | 57 - .../internal/otlp/json_encoder.go | 70 - .../otel_collector/internal/otlp/marshaler.go | 34 - .../internal/otlp/to_translator.go | 54 - .../internal/otlp/unmarshaler.go | 34 - .../otel_collector/internal/otlp_wrapper.go | 97 - .../internal/otlptext/databuffer.go | 17 +- .../otel_collector/internal/otlptext/logs.go | 17 +- .../internal/otlptext/metrics.go | 17 +- .../internal/otlptext/traces.go | 17 +- .../otel_collector/internal/pdatagrpc/logs.go | 76 - .../internal/pdatagrpc/metrics.go | 76 - .../internal/pdatagrpc/traces.go | 77 - .../internal/processor/filterexpr/matcher.go | 14 +- .../processor/filterhelper/filterhelper.go | 2 +- .../internal/processor/filterlog/filterlog.go | 2 +- .../filtermatcher/attributematcher.go | 2 +- .../processor/filtermatcher/filtermatcher.go | 2 +- .../processor/filtermetric/expr_matcher.go | 2 +- .../processor/filtermetric/filtermetric.go | 2 +- .../processor/filtermetric/name_matcher.go | 2 +- .../processor/filterspan/filterspan.go | 2 +- .../testcomponents/example_exporter.go | 2 +- .../internal/testdata/common.go | 102 +- .../otel_collector/internal/testdata/log.go | 186 +- .../internal/testdata/metric.go | 328 +- .../internal/testdata/resource.go | 16 +- .../otel_collector/internal/testdata/trace.go | 179 +- .../otel_collector/obsreport/obsreport.go | 37 +- .../obsreport/obsreport_exporter.go | 54 +- .../obsreport/obsreport_receiver.go | 156 +- .../obsreport/obsreport_scraper.go | 57 +- .../attributesprocessor/attributes_log.go | 5 +- .../attributesprocessor/attributes_trace.go | 5 +- .../processor/attributesprocessor/factory.go | 4 +- .../attributesprocessor/testdata/config.yaml | 2 +- .../batchprocessor/batch_processor.go | 4 +- .../processor/batchprocessor/splitlogs.go | 4 +- .../processor/batchprocessor/splitmetrics.go | 87 +- .../processor/batchprocessor/splittraces.go | 2 +- .../processor/filterprocessor/factory.go | 2 +- .../filterprocessor/filter_processor.go | 6 +- .../processor/memorylimiter/factory.go | 6 +- .../processor/memorylimiter/memorylimiter.go | 24 +- .../probabilisticsampler.go | 6 +- .../processor/processorhelper/attraction.go | 2 +- .../processor/processorhelper/hasher.go | 2 +- .../processor/processorhelper/logs.go | 29 +- .../processor/processorhelper/metrics.go | 29 +- .../processor/processorhelper/processor.go | 9 +- .../processor/processorhelper/traces.go | 29 +- .../processor/resourceprocessor/factory.go | 9 +- .../resourceprocessor/resource_processor.go | 11 +- .../processor/spanprocessor/factory.go | 2 +- .../processor/spanprocessor/span.go | 4 +- internal/otel_collector/proto_patch.sed | 6 +- internal/otel_collector/receiver/doc.go | 6 +- .../receiver/hostmetricsreceiver/doc.go | 16 + .../receiver/hostmetricsreceiver/factory.go | 8 +- .../internal/metadata/generated_metrics.go | 38 +- .../hostmetricsreceiver/internal/scraper.go | 4 +- .../scraper/cpuscraper/cpu_scraper.go | 13 +- .../scraper/cpuscraper/cpu_scraper_linux.go | 20 +- .../scraper/cpuscraper/cpu_scraper_others.go | 12 +- .../internal/scraper/cpuscraper/factory.go | 2 +- .../diskscraper/disk_scraper_others.go | 56 +- .../disk_scraper_others_fallback.go | 4 +- .../diskscraper/disk_scraper_others_linux.go | 24 +- .../diskscraper/disk_scraper_windows.go | 54 +- .../internal/scraper/diskscraper/factory.go | 2 +- .../scraper/filesystemscraper/factory.go | 2 +- .../filesystemscraper/filesystem_scraper.go | 14 +- .../filesystem_scraper_others.go | 10 +- .../filesystem_scraper_unix.go | 23 +- .../internal/scraper/loadscraper/factory.go | 2 +- .../scraper/loadscraper/load_scraper.go | 12 +- .../internal/scraper/memoryscraper/factory.go | 2 +- .../scraper/memoryscraper/memory_scraper.go | 8 +- .../memoryscraper/memory_scraper_linux.go | 14 +- .../memoryscraper/memory_scraper_others.go | 8 +- .../memoryscraper/memory_scraper_windows.go | 6 +- .../scraper/networkscraper/factory.go | 2 +- .../scraper/networkscraper/network_scraper.go | 54 +- .../internal/scraper/pagingscraper/factory.go | 2 +- .../pagingscraper/paging_scraper_others.go | 36 +- .../pagingscraper/paging_scraper_windows.go | 24 +- .../scraper/processesscraper/factory.go | 2 +- .../processesscraper/processes_scraper.go | 8 +- .../processes_scraper_darwin.go | 4 +- .../processes_scraper_fallback.go | 4 +- .../processes_scraper_linux.go | 9 +- .../processes_scraper_unix.go | 21 +- .../scraper/processscraper/factory.go | 2 +- .../scraper/processscraper/process.go | 2 +- .../scraper/processscraper/process_scraper.go | 31 +- .../processscraper/process_scraper_linux.go | 8 +- .../processscraper/process_scraper_others.go | 2 +- .../processscraper/process_scraper_windows.go | 6 +- .../hostmetricsreceiver/internal/testutils.go | 20 +- .../hostmetricsreceiver/metadata.yaml | 16 +- .../receiver/jaegerreceiver/config.go | 32 +- .../receiver/jaegerreceiver/doc.go | 16 + .../receiver/jaegerreceiver/trace_receiver.go | 6 +- .../receiver/kafkareceiver/doc.go | 16 + .../receiver/kafkareceiver/factory.go | 34 +- .../kafkareceiver/jaeger_unmarshaler.go | 2 +- .../receiver/kafkareceiver/kafka_receiver.go | 150 +- .../kafkareceiver/otlp_unmarshaler.go | 45 - .../kafkareceiver/pdata_unmarshaler.go | 79 + .../receiver/kafkareceiver/unmarshaler.go | 35 +- .../kafkareceiver/zipkin_unmarshaler.go | 99 +- .../receiver/opencensusreceiver/doc.go | 16 + .../internal/ocmetrics/opencensus.go | 10 +- .../internal/octrace/opencensus.go | 12 +- .../receiver/otlpreceiver/config.go | 24 +- .../receiver/otlpreceiver/doc.go | 16 + .../receiver/otlpreceiver/factory.go | 12 +- .../otlpreceiver/internal/logs/otlp.go | 35 +- .../otlpreceiver/internal/marshal_jsonpb.go | 301 - .../otlpreceiver/internal/metrics/otlp.go | 38 +- .../otlpreceiver/internal/trace/otlp.go | 34 +- .../receiver/otlpreceiver/mixin.go | 43 +- .../receiver/otlpreceiver/otlp.go | 96 +- .../receiver/otlpreceiver/otlphttp.go | 162 +- .../receiver/prometheusreceiver/README.md | 8 +- .../receiver/prometheusreceiver/config.go | 20 +- .../receiver/prometheusreceiver/doc.go | 3 +- .../receiver/prometheusreceiver/factory.go | 4 +- .../prometheusreceiver/internal/metadata.go | 20 +- .../internal/metricfamily.go | 41 +- .../internal/metrics_adjuster.go | 16 +- .../internal/metricsbuilder.go | 42 +- .../prometheusreceiver/internal/ocastore.go | 16 +- .../internal/otlp_metricfamily.go | 4 +- .../internal/otlp_metricsbuilder.go | 6 +- .../internal/prom_to_otlp.go | 2 +- .../internal/staleness_store.go | 118 + .../internal/transaction.go | 24 +- .../prometheusreceiver/metrics_receiver.go | 26 +- .../invalid-config-prometheus-relabel.yaml | 22 + .../receiver/receiverhelper/doc.go | 16 + .../receiver/scrapererror/doc.go | 16 + .../scrapererror/partialscrapeerror.go | 6 +- .../{scrapeerrors.go => scrapeerror.go} | 0 .../receiver/scraperhelper/doc.go | 16 + .../receiver/scraperhelper/scraper.go | 73 +- .../scraperhelper/scrapercontroller.go | 95 +- .../receiver/zipkinreceiver/doc.go | 16 + .../receiver/zipkinreceiver/trace_receiver.go | 73 +- .../service/{application.go => collector.go} | 203 +- ...cation_windows.go => collector_windows.go} | 42 +- .../service/internal/builder/builder.go | 2 +- .../internal/builder/exporters_builder.go | 52 +- .../internal/builder/extensions_builder.go | 38 +- .../internal/builder/pipelines_builder.go | 37 +- .../internal/builder/receivers_builder.go | 49 +- .../fanoutconsumer/cloningconsumer.go | 2 +- .../internal/fanoutconsumer/consumer.go | 2 +- .../service/parserprovider/setflag.go | 59 +- internal/otel_collector/service/service.go | 21 +- internal/otel_collector/service/settings.go | 14 +- internal/otel_collector/service/telemetry.go | 12 +- .../metrics/correctness_test_case.go | 2 +- .../correctness/metrics/metric_diff.go | 14 +- .../correctness/metrics/metric_index.go | 2 +- .../correctness/metrics/metric_supplier.go | 2 +- .../metrics/metrics_test_harness.go | 2 +- ..._process.go => child_process_collector.go} | 64 +- .../testbed/testbed/data_providers.go | 149 +- .../testbed/testbed/in_process_collector.go | 137 + .../testbed/testbed/load_generator.go | 5 +- .../testbed/testbed/mock_backend.go | 9 +- .../otel_collector/testbed/testbed/options.go | 63 +- .../testbed/testbed/otelcol_runner.go | 132 +- .../testbed/testbed/receivers.go | 79 +- .../otel_collector/testbed/testbed/senders.go | 157 +- .../testbed/testbed/test_bed.go | 7 +- .../testbed/testbed/test_case.go | 64 +- .../testbed/testbed/validator.go | 4 +- .../otel_collector/testbed/tests/scenarios.go | 17 +- .../testutil/metricstestutil/metricsutil.go | 14 +- .../translator/internaldata/metrics_to_oc.go | 24 +- .../translator/internaldata/oc_to_metrics.go | 84 +- .../translator/internaldata/oc_to_resource.go | 2 +- .../translator/internaldata/oc_to_traces.go | 38 +- .../translator/internaldata/resource_to_oc.go | 2 +- .../translator/internaldata/timestamp.go | 2 +- .../translator/internaldata/traces_to_oc.go | 2 +- .../trace/{ => internal}/zipkin/attributes.go | 28 +- .../translator/trace/jaeger/constants.go | 4 +- .../trace/jaeger/jaegerproto_to_traces.go | 88 +- .../trace/jaeger/jaegerthrift_to_traces.go | 31 +- .../trace/jaeger/traces_to_jaegerproto.go | 2 +- .../translator/trace/protospan_translation.go | 2 +- .../trace/zipkin/zipkinv1_thrift_to_traces.go | 34 - .../trace/zipkin/zipkinv1_to_traces.go | 36 - .../{zipkin => zipkinv1}/consumerdata.go | 2 +- .../{zipkin => zipkinv1}/grpc_http_mapper.go | 2 +- .../json.go} | 30 +- .../trace/{zipkin => zipkinv1}/status_code.go | 2 +- .../testdata/zipkin_v1_error_batch.json | 0 .../testdata/zipkin_v1_local_component.json | 0 .../testdata/zipkin_v1_multiple_batches.json | 0 .../testdata/zipkin_v1_single_batch.json | 0 .../zipkin_v1_thrift_local_component.json | 0 .../zipkin_v1_thrift_single_batch.json | 0 .../thrift.go} | 26 +- .../trace/zipkinv1/to_translator.go | 31 + .../from_translator.go} | 24 +- .../translator/trace/zipkinv2/json.go | 61 + .../translator/trace/zipkinv2/protobuf.go | 65 + .../testdata/zipkin_v2_notimestamp.json | 0 .../testdata/zipkin_v2_single.json | 0 .../to_translator.go} | 171 +- processor/otel/consumer.go | 16 +- processor/otel/consumer_test.go | 51 +- processor/otel/exceptions_test.go | 2 +- processor/otel/metadata.go | 2 +- processor/otel/metadata_test.go | 13 +- processor/otel/metrics.go | 18 +- processor/otel/metrics_test.go | 212 +- processor/otel/timestamps.go | 2 +- 398 files changed, 8160 insertions(+), 30788 deletions(-) delete mode 100644 internal/otel_collector/consumer/pdata/common.go delete mode 100644 internal/otel_collector/consumer/pdata/doc.go delete mode 100644 internal/otel_collector/consumer/pdata/generated_common.go delete mode 100644 internal/otel_collector/consumer/pdata/generated_log.go delete mode 100644 internal/otel_collector/consumer/pdata/generated_metrics.go delete mode 100644 internal/otel_collector/consumer/pdata/generated_resource.go delete mode 100644 internal/otel_collector/consumer/pdata/generated_trace.go delete mode 100644 internal/otel_collector/consumer/pdata/logs.go delete mode 100644 internal/otel_collector/consumer/pdata/metrics.go delete mode 100644 internal/otel_collector/consumer/pdata/spanid.go delete mode 100644 internal/otel_collector/consumer/pdata/timestamp.go delete mode 100644 internal/otel_collector/consumer/pdata/traceid.go delete mode 100644 internal/otel_collector/consumer/pdata/traces.go delete mode 100644 internal/otel_collector/consumer/simple/metrics.go rename internal/otel_collector/{consumer/simple => exporter/exporterhelper}/doc.go (79%) rename internal/otel_collector/{internal/model/errors.go => exporter/fileexporter/doc.go} (71%) rename internal/{.otel_collector_mixin/otlptext/mixin.go => otel_collector/exporter/kafkaexporter/doc.go} (69%) delete mode 100644 internal/otel_collector/exporter/kafkaexporter/otlp_marshaler.go create mode 100644 internal/otel_collector/exporter/kafkaexporter/pdata_marshaler.go rename internal/otel_collector/{otlptext/mixin.go => exporter/loggingexporter/doc.go} (69%) create mode 100644 internal/otel_collector/exporter/opencensusexporter/doc.go create mode 100644 internal/otel_collector/exporter/otlpexporter/doc.go create mode 100644 internal/otel_collector/exporter/otlphttpexporter/doc.go create mode 100644 internal/otel_collector/exporter/prometheusexporter/doc.go create mode 100644 internal/otel_collector/exporter/prometheusremotewriteexporter/doc.go create mode 100644 internal/otel_collector/exporter/zipkinexporter/doc.go create mode 100644 internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/cgroup.go (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/cgroups.go (94%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/doc.go (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/errors.go (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/mountpoint.go (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/subsys.go (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/cgroups/cgroup (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/cgroups/mountinfo (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/invalid-cgroup/cgroup (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/untranslatable/cgroup (100%) rename internal/otel_collector/{processor/memorylimiter => }/internal/cgroups/testdata/proc/untranslatable/mountinfo (100%) delete mode 100644 internal/otel_collector/internal/data/.gitignore delete mode 100644 internal/otel_collector/internal/data/bytesid.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.gw.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.gw.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_config.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.gw.go delete mode 100644 internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service_gateway_aliases.go delete mode 100644 internal/otel_collector/internal/data/protogen/common/v1/common.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/logs/v1/logs.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/metrics/v1/metrics.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/resource/v1/resource.pb.go delete mode 100644 internal/otel_collector/internal/data/protogen/trace/v1/trace.pb.go delete mode 100644 internal/otel_collector/internal/data/spanid.go delete mode 100644 internal/otel_collector/internal/data/traceid.go create mode 100644 internal/otel_collector/internal/iruntime/mem_info.go rename internal/otel_collector/{processor/memorylimiter => }/internal/iruntime/total_memory_linux.go (68%) rename internal/otel_collector/{processor/memorylimiter => }/internal/iruntime/total_memory_other.go (66%) delete mode 100644 internal/otel_collector/internal/model/README.md delete mode 100644 internal/otel_collector/internal/model/decoder.go delete mode 100644 internal/otel_collector/internal/model/encoder.go delete mode 100644 internal/otel_collector/internal/model/encoding.go delete mode 100644 internal/otel_collector/internal/model/from_translator.go delete mode 100644 internal/otel_collector/internal/model/marshaler.go delete mode 100644 internal/otel_collector/internal/model/to_translator.go delete mode 100644 internal/otel_collector/internal/model/unmarshal.go delete mode 100644 internal/otel_collector/internal/otlp/from_translator.go delete mode 100644 internal/otel_collector/internal/otlp/json_decoder.go delete mode 100644 internal/otel_collector/internal/otlp/json_encoder.go delete mode 100644 internal/otel_collector/internal/otlp/marshaler.go delete mode 100644 internal/otel_collector/internal/otlp/to_translator.go delete mode 100644 internal/otel_collector/internal/otlp/unmarshaler.go delete mode 100644 internal/otel_collector/internal/otlp_wrapper.go delete mode 100644 internal/otel_collector/internal/pdatagrpc/logs.go delete mode 100644 internal/otel_collector/internal/pdatagrpc/metrics.go delete mode 100644 internal/otel_collector/internal/pdatagrpc/traces.go create mode 100644 internal/otel_collector/receiver/hostmetricsreceiver/doc.go create mode 100644 internal/otel_collector/receiver/jaegerreceiver/doc.go create mode 100644 internal/otel_collector/receiver/kafkareceiver/doc.go delete mode 100644 internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaler.go create mode 100644 internal/otel_collector/receiver/kafkareceiver/pdata_unmarshaler.go create mode 100644 internal/otel_collector/receiver/opencensusreceiver/doc.go create mode 100644 internal/otel_collector/receiver/otlpreceiver/doc.go delete mode 100644 internal/otel_collector/receiver/otlpreceiver/internal/marshal_jsonpb.go create mode 100644 internal/otel_collector/receiver/prometheusreceiver/internal/staleness_store.go create mode 100644 internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml create mode 100644 internal/otel_collector/receiver/receiverhelper/doc.go create mode 100644 internal/otel_collector/receiver/scrapererror/doc.go rename internal/otel_collector/receiver/scrapererror/{scrapeerrors.go => scrapeerror.go} (100%) create mode 100644 internal/otel_collector/receiver/scraperhelper/doc.go create mode 100644 internal/otel_collector/receiver/zipkinreceiver/doc.go rename internal/otel_collector/service/{application.go => collector.go} (52%) rename internal/otel_collector/service/{application_windows.go => collector_windows.go} (80%) rename internal/otel_collector/testbed/testbed/{child_process.go => child_process_collector.go} (85%) create mode 100644 internal/otel_collector/testbed/testbed/in_process_collector.go rename internal/otel_collector/translator/trace/{ => internal}/zipkin/attributes.go (53%) delete mode 100644 internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go delete mode 100644 internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/consumerdata.go (98%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/grpc_http_mapper.go (99%) rename internal/otel_collector/translator/trace/{zipkin/zipkinv1_to_protospan.go => zipkinv1/json.go} (94%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/status_code.go (99%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_error_batch.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_local_component.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_multiple_batches.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_single_batch.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_thrift_local_component.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv1}/testdata/zipkin_v1_thrift_single_batch.json (100%) rename internal/otel_collector/translator/trace/{zipkin/zipkinv1_thrift_to_protospan.go => zipkinv1/thrift.go} (92%) create mode 100644 internal/otel_collector/translator/trace/zipkinv1/to_translator.go rename internal/otel_collector/translator/trace/{zipkin/traces_to_zipkinv2.go => zipkinv2/from_translator.go} (93%) create mode 100644 internal/otel_collector/translator/trace/zipkinv2/json.go create mode 100644 internal/otel_collector/translator/trace/zipkinv2/protobuf.go rename internal/otel_collector/translator/trace/{zipkin => zipkinv2}/testdata/zipkin_v2_notimestamp.json (100%) rename internal/otel_collector/translator/trace/{zipkin => zipkinv2}/testdata/zipkin_v2_single.json (100%) rename internal/otel_collector/translator/trace/{zipkin/zipkinv2_to_traces.go => zipkinv2/to_translator.go} (83%) diff --git a/NOTICE.txt b/NOTICE.txt index 454991cf5ff..0384db80c49 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic APM Server project: -------------------------------------------------------------------------------- Dependency : github.com/apache/thrift -Version: v0.14.1 +Version: v0.14.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.14.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.14.2/LICENSE: Apache License @@ -1797,11 +1797,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/google/pprof -Version: v0.0.0-20210323184331-8eee2492667d +Version: v0.0.0-20210609004039-a478d1d731e9 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-20210323184331-8eee2492667d/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-20210609004039-a478d1d731e9/LICENSE: Apache License @@ -2744,11 +2744,11 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- Dependency : github.com/jaegertracing/jaeger -Version: v1.23.0 +Version: v1.24.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/jaegertracing/jaeger@v1.23.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/jaegertracing/jaeger@v1.24.0/LICENSE: Apache License Version 2.0, January 2004 @@ -2955,11 +2955,11 @@ Contents of probable licence file $GOMODCACHE/github.com/jaegertracing/jaeger@v1 -------------------------------------------------------------------------------- Dependency : github.com/json-iterator/go -Version: v1.1.10 +Version: v1.1.11 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.11/LICENSE: MIT License @@ -3259,11 +3259,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/ryanuber/go-glob -Version: v0.0.0-20170128012129-256dc444b735 +Version: v1.0.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/ryanuber/go-glob@v0.0.0-20170128012129-256dc444b735/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/ryanuber/go-glob@v1.0.0/LICENSE: The MIT License (MIT) @@ -3290,11 +3290,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/spf13/cobra -Version: v1.1.3 +Version: v1.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/spf13/cobra@v1.1.3/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/spf13/cobra@v1.2.1/LICENSE.txt: Apache License Version 2.0, January 2004 @@ -4389,7 +4389,7 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/collector -Version: v0.28.0 +Version: v0.30.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- @@ -4599,13 +4599,225 @@ Contents of probable licence file LICENSE: limitations under the License. +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/model +Version: v0.30.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/model@v0.30.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : go.uber.org/atomic -Version: v1.8.0 +Version: v1.9.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.8.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.9.0/LICENSE.txt: Copyright (c) 2016 Uber Technologies, Inc. @@ -4733,11 +4945,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/time -Version: v0.0.0-20210220033141-f8bda1e9f3ba +Version: v0.0.0-20210611083556-38a9dc6acbc6 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20210220033141-f8bda1e9f3ba/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20210611083556-38a9dc6acbc6/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -6029,11 +6241,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2 -Version: v0.18.0 +Version: v0.9.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v0.18.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v0.9.0/LICENSE.txt: Apache License @@ -6929,11 +7141,11 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/dgraph-io/ristretto -Version: v0.0.3-0.20200630154024-f66de99634de +Version: v0.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.0.3-0.20200630154024-f66de99634de/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.1.0/LICENSE: Apache License Version 2.0, January 2004 @@ -9897,6 +10109,35 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/felixge/httpsnoop +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/felixge/httpsnoop@v1.0.2/LICENSE.txt: + +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/adriansr/fsnotify Version: v0.0.0-20180417234312-c9bbe1f46f1d @@ -10428,198 +10669,209 @@ Apache License -------------------------------------------------------------------------------- -Dependency : github.com/golang/groupcache -Version: v0.0.0-20200121045136-8c9f03a8e57e +Dependency : github.com/slok/noglog +Version: v0.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/golang/groupcache@v0.0.0-20200121045136-8c9f03a8e57e/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/slok/noglog@v0.2.0/LICENSE: -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. + 1. Definitions. -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -2. Grant of Copyright License. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -3. Grant of Patent License. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -4. Redistribution. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -5. Submission of Contributions. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -6. Trademarks. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -7. Disclaimer of Warranty. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -8. Limitation of Liability. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -9. Accepting Warranty or Additional Liability. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS -APPENDIX: How to apply the Apache License to your work + APPENDIX: How to apply the Apache License to your work. -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright [2018] [Spotahome Ltd.] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -10668,11 +10920,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/golang/snappy -Version: v0.0.3 +Version: v0.0.4 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.4/LICENSE: Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. @@ -10914,26 +11166,238 @@ distribution. contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/gofuzz +Version: v1.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/google/gofuzz -Version: v1.1.0 +Dependency : github.com/googleapis/gnostic +Version: v0.4.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: Apache License @@ -11139,13 +11603,14 @@ Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LI limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/googleapis/gnostic -Version: v0.4.1 +Dependency : github.com/gorhill/cronexpr +Version: v0.0.0-20180427100037-88b0669f7d75 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/gorhill/cronexpr@v0.0.0-20180427100037-88b0669f7d75/APLv2: Apache License @@ -11285,483 +11750,634 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4 the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/mux +Version: v1.8.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/websocket +Version: v1.4.2 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gorilla/websocket@v1.4.2/LICENSE: + +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/h2non/filetype +Version: v1.1.1-0.20201130172452-f60988ab73d5 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: + +The MIT License + +Copyright (c) Tomas Aparicio + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/errwrap +Version: v1.1.0 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/errwrap@v1.1.0/LICENSE: + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +1.10. “Modifications” - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + means any of the following: - END OF TERMS AND CONDITIONS + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or - APPENDIX: How to apply the Apache License to your work. + b. any new file in Source Code Form that contains any Covered Software. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +1.11. “Patent Claims” of a Contributor - Copyright [yyyy] [name of copyright owner] + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +1.12. “Secondary License” - http://www.apache.org/licenses/LICENSE-2.0 + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +1.13. “Source Code Form” + means the form of the work preferred for making modifications. +1.14. “You” (or “Your”) --------------------------------------------------------------------------------- -Dependency : github.com/gorhill/cronexpr -Version: v0.0.0-20180427100037-88b0669f7d75 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. -Contents of probable licence file $GOMODCACHE/github.com/gorhill/cronexpr@v0.0.0-20180427100037-88b0669f7d75/APLv2: +2. License Grants and Conditions - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +2.1. Grants - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: - 1. Definitions. + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +2.2. Effective Date - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +2.3. Limitations on Grant Scope - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. + a. for any code that a Contributor has removed from Covered Software; or - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +2.4. Subsequent Licenses - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +2.5. Representation - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +2.6. Fair Use - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +2.7. Conditions - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +3.2. Distribution of Executable Form - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. + If You distribute Covered Software in Executable Form then: - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. - END OF TERMS AND CONDITIONS +3.3. Distribution of a Larger Work - APPENDIX: How to apply the Apache License to your work. + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +3.4. Notices - Copyright [yyyy] [name of copyright owner] + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +3.5. Application of Additional Terms - http://www.apache.org/licenses/LICENSE-2.0 + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +4. Inability to Comply Due to Statute or Regulation + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. --------------------------------------------------------------------------------- -Dependency : github.com/gorilla/websocket -Version: v1.4.2 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- +5. Termination -Contents of probable licence file $GOMODCACHE/github.com/gorilla/websocket@v1.4.2/LICENSE: +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +6. Disclaimer of Warranty - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +7. Limitation of Liability + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. --------------------------------------------------------------------------------- -Dependency : github.com/grpc-ecosystem/grpc-gateway -Version: v1.16.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- +8. Litigation -Contents of probable licence file $GOMODCACHE/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/LICENSE.txt: + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. -Copyright (c) 2015, Gengo, Inc. -All rights reserved. +9. Miscellaneous -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +10. Versions of the License - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. +10.1. New Versions -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. +10.2. Effect of New Versions --------------------------------------------------------------------------------- -Dependency : github.com/h2non/filetype -Version: v1.1.1-0.20201130172452-f60988ab73d5 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. -Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: +10.3. Modified Versions -The MIT License + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -Copyright (c) Tomas Aparicio +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/errwrap -Version: v1.1.0 +Dependency : github.com/hashicorp/go-cleanhttp +Version: v0.5.1 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/errwrap@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-cleanhttp@v0.5.1/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. “Patent Claims” of a Contributor +1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -1.12. “Secondary License” +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -11777,57 +12393,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -11840,11 +12458,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -11856,39 +12475,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -11897,14 +12517,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -11912,39 +12532,40 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -11956,27 +12577,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -11990,23 +12613,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -12017,27 +12641,28 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-cleanhttp -Version: v0.5.1 +Dependency : github.com/hashicorp/go-rootcerts +Version: v1.0.2 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-cleanhttp@v0.5.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-rootcerts@v1.0.2/LICENSE: Mozilla Public License, version 2.0 @@ -12405,12 +13030,12 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-rootcerts +Dependency : github.com/hashicorp/go-uuid Version: v1.0.2 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-rootcerts@v1.0.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: Mozilla Public License, version 2.0 @@ -12778,12 +13403,12 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-uuid -Version: v1.0.2 +Dependency : github.com/hashicorp/nomad/api +Version: v0.0.0-20200303134319-e31695b5bbe6 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/nomad/api@v0.0.0-20200303134319-e31695b5bbe6/LICENSE: Mozilla Public License, version 2.0 @@ -13151,903 +13776,1332 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/hcl +Dependency : github.com/imdario/mergo +Version: v0.3.11 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/imdario/mergo@v0.3.11/LICENSE: + +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/inconshreveable/mousetrap +Version: v1.0.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/inconshreveable/mousetrap@v1.0.0/LICENSE: + +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/jcmturner/gofork Version: v1.0.0 -Licence type (autodetected): MPL-2.0 +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/hcl@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gofork@v1.0.0/LICENSE: -Mozilla Public License, version 2.0 +Copyright (c) 2009 The Go Authors. All rights reserved. -1. Definitions +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -1.1. “Contributor” + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -1.2. “Contributor Version” - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. +-------------------------------------------------------------------------------- +Dependency : github.com/jmespath/go-jmespath +Version: v0.4.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- -1.3. “Contribution” +Contents of probable licence file $GOMODCACHE/github.com/jmespath/go-jmespath@v0.4.0/LICENSE: - means Covered Software of a particular Contributor. +Copyright 2015 James Saryerwinnie -1.4. “Covered Software” +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. + http://www.apache.org/licenses/LICENSE-2.0 -1.5. “Incompatible With Secondary Licenses” - means +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. +-------------------------------------------------------------------------------- +Dependency : github.com/joeshaw/multierror +Version: v0.0.0-20140124173710-69b34d4ec901 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- -1.6. “Executable Form” +Contents of probable licence file $GOMODCACHE/github.com/joeshaw/multierror@v0.0.0-20140124173710-69b34d4ec901/LICENSE: - means any form of the work other than Source Code Form. +The MIT License (MIT) -1.7. “Larger Work” +Copyright (c) 2014 Joe Shaw - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -1.8. “License” - means this document. +-------------------------------------------------------------------------------- +Dependency : github.com/jonboulle/clockwork +Version: v0.2.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- -1.9. “Licensable” +Contents of probable licence file $GOMODCACHE/github.com/jonboulle/clockwork@v0.2.2/LICENSE: - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -1.10. “Modifications” + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - means any of the following: + 1. Definitions. - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - b. any new file in Source Code Form that contains any Covered Software. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -1.11. “Patent Claims” of a Contributor + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -1.12. “Secondary License” + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -1.13. “Source Code Form” + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - means the form of the work preferred for making modifications. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -1.14. “You” (or “Your”) + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -2. License Grants and Conditions + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -2.1. Grants + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -2.2. Effective Date + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -2.3. Limitations on Grant Scope + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. - a. for any code that a Contributor has removed from Covered Software; or + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + END OF TERMS AND CONDITIONS -2.4. Subsequent Licenses + APPENDIX: How to apply the Apache License to your work. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -2.5. Representation + Copyright {yyyy} {name of copyright owner} - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -2.6. Fair Use + http://www.apache.org/licenses/LICENSE-2.0 - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -2.7. Conditions - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. +-------------------------------------------------------------------------------- +Dependency : github.com/josharian/intern +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- +Contents of probable licence file $GOMODCACHE/github.com/josharian/intern@v1.0.0/license.md: -3. Responsibilities +MIT License -3.1. Distribution of Source Form +Copyright (c) 2019 Josh Bleecher Snyder - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -3.2. Distribution of Executable Form +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - If You distribute Covered Software in Executable Form then: +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. +-------------------------------------------------------------------------------- +Dependency : github.com/klauspost/compress +Version: v1.11.12 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- -3.3. Distribution of a Larger Work +Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.11.12/LICENSE: - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. -3.4. Notices +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -3.5. Application of Additional Terms +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. -4. Inability to Comply Due to Statute or Regulation +-------------------------------------------------------------------------------- +Dependency : github.com/knadh/koanf +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. +Contents of probable licence file $GOMODCACHE/github.com/knadh/koanf@v1.1.1/LICENSE: -5. Termination +The MIT License -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. +Copyright (c) 2019, Kailash Nadh. https://github.com/knadh -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -6. Disclaimer of Warranty +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. -7. Limitation of Liability +-------------------------------------------------------------------------------- +Dependency : github.com/mailru/easyjson +Version: v0.7.7 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. +Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.7/LICENSE: -8. Litigation +Copyright (c) 2016 Mail.Ru Group - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -9. Miscellaneous +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -10. Versions of the License +-------------------------------------------------------------------------------- +Dependency : github.com/mattn/go-colorable +Version: v0.1.8 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- -10.1. New Versions +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-colorable@v0.1.8/LICENSE: - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. +The MIT License (MIT) -10.2. Effect of New Versions +Copyright (c) 2016 Yasuhiro Matsumoto - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -10.3. Modified Versions +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. -Exhibit A - Source Code Form License Notice +-------------------------------------------------------------------------------- +Dependency : github.com/mattn/go-isatty +Version: v0.0.13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-isatty@v0.0.13/LICENSE: -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +Copyright (c) Yasuhiro MATSUMOTO -You may add additional accurate notices of copyright ownership. +MIT License (Expat) -Exhibit B - “Incompatible With Secondary Licenses” Notice +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/nomad/api -Version: v0.0.0-20200303134319-e31695b5bbe6 -Licence type (autodetected): MPL-2.0 +Dependency : github.com/miekg/dns +Version: v1.1.42 +Licence type (autodetected): BSD -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/nomad/api@v0.0.0-20200303134319-e31695b5bbe6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/miekg/dns@v1.1.42/COPYRIGHT: -Mozilla Public License, version 2.0 +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben -1. Definitions +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. -1.1. "Contributor" +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. -1.2. "Contributor Version" +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/copystructure +Version: v1.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/copystructure@v1.2.0/LICENSE: -1.3. "Contribution" +The MIT License (MIT) - means Covered Software of a particular Contributor. +Copyright (c) 2014 Mitchell Hashimoto -1.4. "Covered Software" +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -1.5. "Incompatible With Secondary Licenses" - means +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/go-homedir +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- -1.6. "Executable Form" +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/go-homedir@v1.1.0/LICENSE: - means any form of the work other than Source Code Form. +The MIT License (MIT) -1.7. "Larger Work" +Copyright (c) 2013 Mitchell Hashimoto - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -1.8. "License" +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - means this document. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/hashstructure +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- -1.10. "Modifications" +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/hashstructure@v1.1.0/LICENSE: - means any of the following: +The MIT License (MIT) - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or +Copyright (c) 2016 Mitchell Hashimoto - b. any new file in Source Code Form that contains any Covered Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -1.11. "Patent Claims" of a Contributor +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/mapstructure +Version: v1.4.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- -1.13. "Source Code Form" +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.4.1/LICENSE: - means the form of the work preferred for making modifications. +The MIT License (MIT) -1.14. "You" (or "Your") +Copyright (c) 2013 Mitchell Hashimoto - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -2. License Grants and Conditions +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -2.1. Grants - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/reflectwalk +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/reflectwalk@v1.0.2/LICENSE: - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. +The MIT License (MIT) -2.2. Effective Date +Copyright (c) 2013 Mitchell Hashimoto - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -2.3. Limitations on Grant Scope +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or +-------------------------------------------------------------------------------- +Dependency : github.com/modern-go/concurrent +Version: v0.0.0-20180306012644-bacd9c7ef1dd +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. +Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0.0.0-20180306012644-bacd9c7ef1dd/LICENSE: - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -2.4. Subsequent Licenses + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + 1. Definitions. -2.5. Representation + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -2.6. Fair Use + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -2.7. Conditions + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -3. Responsibilities + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -3.1. Distribution of Source Form + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -3.2. Distribution of Executable Form + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - If You distribute Covered Software in Executable Form then: + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -3.3. Distribution of a Larger Work + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -3.4. Notices + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -3.5. Application of Additional Terms + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. + END OF TERMS AND CONDITIONS -4. Inability to Comply Due to Statute or Regulation + APPENDIX: How to apply the Apache License to your work. - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -5. Termination + Copyright [yyyy] [name of copyright owner] -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + http://www.apache.org/licenses/LICENSE-2.0 -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. +-------------------------------------------------------------------------------- +Dependency : github.com/opencontainers/go-digest +Version: v1.0.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- -7. Limitation of Liability +Contents of probable licence file $GOMODCACHE/github.com/opencontainers/go-digest@v1.0.0/LICENSE: - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. -8. Litigation + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -9. Miscellaneous + 1. Definitions. - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -10. Versions of the License + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -10.1. New Versions + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -10.2. Effect of New Versions + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -10.3. Modified Versions + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -Exhibit A - Source Code Form License Notice + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -You may add additional accurate notices of copyright ownership. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -Exhibit B - "Incompatible With Secondary Licenses" Notice + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. --------------------------------------------------------------------------------- -Dependency : github.com/imdario/mergo -Version: v0.3.11 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -Contents of probable licence file $GOMODCACHE/github.com/imdario/mergo@v0.3.11/LICENSE: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/inconshreveable/mousetrap -Version: v1.0.0 +Dependency : github.com/opencontainers/image-spec +Version: v1.0.2-0.20190823105129-775207bd45b6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/inconshreveable/mousetrap@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/opencontainers/image-spec@v1.0.2-0.20190823105129-775207bd45b6/LICENSE: -Copyright 2014 Alan Shreve -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - http://www.apache.org/licenses/LICENSE-2.0 + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + 1. Definitions. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. --------------------------------------------------------------------------------- -Dependency : github.com/jcmturner/gofork -Version: v1.0.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gofork@v1.0.0/LICENSE: + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -Copyright (c) 2009 The Go Authors. All rights reserved. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. --------------------------------------------------------------------------------- -Dependency : github.com/jmespath/go-jmespath -Version: v0.4.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -Contents of probable licence file $GOMODCACHE/github.com/jmespath/go-jmespath@v0.4.0/LICENSE: + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -Copyright 2015 James Saryerwinnie + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - http://www.apache.org/licenses/LICENSE-2.0 + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and --------------------------------------------------------------------------------- -Dependency : github.com/joeshaw/multierror -Version: v0.0.0-20140124173710-69b34d4ec901 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -Contents of probable licence file $GOMODCACHE/github.com/joeshaw/multierror@v0.0.0-20140124173710-69b34d4ec901/LICENSE: + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -The MIT License (MIT) + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -Copyright (c) 2014 Joe Shaw + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/jonboulle/clockwork -Version: v0.2.2 +Dependency : github.com/opentracing/opentracing-go +Version: v1.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/jonboulle/clockwork@v0.2.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing-go@v1.2.0/LICENSE: -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -14235,7 +15289,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright 2016 The OpenTracing Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14251,163 +15305,309 @@ Apache License -------------------------------------------------------------------------------- -Dependency : github.com/josharian/intern -Version: v1.0.0 -Licence type (autodetected): MIT +Dependency : github.com/pierrec/lz4 +Version: v2.5.2+incompatible +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/josharian/intern@v1.0.0/license.md: +Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4@v2.5.2+incompatible/LICENSE: -MIT License +Copyright (c) 2015, Pierre Curto +All rights reserved. -Copyright (c) 2019 Josh Bleecher Snyder +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/klauspost/compress -Version: v1.11.0 -Licence type (autodetected): BSD-3-Clause +Dependency : github.com/prometheus/procfs +Version: v0.6.0 +Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.11.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/prometheus/procfs@v0.6.0/LICENSE: -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + 1. Definitions. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. --------------------------------------------------------------------------------- -Dependency : github.com/konsorten/go-windows-terminal-sequences -Version: v1.0.3 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + END OF TERMS AND CONDITIONS -Contents of probable licence file $GOMODCACHE/github.com/konsorten/go-windows-terminal-sequences@v1.0.3/LICENSE: + APPENDIX: How to apply the Apache License to your work. -(The MIT License) + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/magiconair/properties -Version: v1.8.1 -Licence type (autodetected): BSD-2-Clause +Dependency : github.com/rcrowley/go-metrics +Version: v0.0.0-20201227073835-cf1acfcdf475 +Licence type (autodetected): BSD-2-Clause-FreeBSD -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/magiconair/properties@v1.8.1/LICENSE: - -goproperties - properties file decoder for Go - -Copyright (c) 2013-2018 - Frank Schroeder +Contents of probable licence file $GOMODCACHE/github.com/rcrowley/go-metrics@v0.0.0-20201227073835-cf1acfcdf475/LICENSE: -All rights reserved. +Copyright 2012 Richard Crowley. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/mailru/easyjson -Version: v0.7.7 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.7/LICENSE: +modification, are permitted provided that the following conditions are +met: -Copyright (c) 2016 Mail.Ru Group + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. -------------------------------------------------------------------------------- -Dependency : github.com/mattn/go-colorable -Version: v0.1.8 +Dependency : github.com/rs/cors +Version: v1.8.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mattn/go-colorable@v0.1.8/LICENSE: - -The MIT License (MIT) +Contents of probable licence file $GOMODCACHE/github.com/rs/cors@v1.8.0/LICENSE: -Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) 2014 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -14417,59 +15617,91 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/mattn/go-isatty -Version: v0.0.13 -Licence type (autodetected): MIT +Dependency : github.com/shirou/gopsutil +Version: v3.21.6+incompatible +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mattn/go-isatty@v0.0.13/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/shirou/gopsutil@v3.21.6+incompatible/LICENSE: -Copyright (c) Yasuhiro MATSUMOTO +gopsutil is distributed under BSD license reproduced below. -MIT License (Expat) +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/miekg/dns -Version: v1.1.41 -Licence type (autodetected): BSD --------------------------------------------------------------------------------- +------- +internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. -Contents of probable licence file $GOMODCACHE/github.com/miekg/dns@v1.1.41/COPYRIGHT: -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. +Copyright (c) 2009 The Go Authors. All rights reserved. -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/mitchellh/go-homedir -Version: v1.1.0 +Dependency : github.com/sirupsen/logrus +Version: v1.8.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/go-homedir@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/sirupsen/logrus@v1.8.1/LICENSE: The MIT License (MIT) -Copyright (c) 2013 Mitchell Hashimoto +Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -14491,16 +15723,16 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/mitchellh/hashstructure -Version: v1.1.0 +Dependency : github.com/spf13/cast +Version: v1.3.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/hashstructure@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/spf13/cast@v1.3.1/LICENSE: The MIT License (MIT) -Copyright (c) 2016 Mitchell Hashimoto +Copyright (c) 2014 Steve Francia Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -14509,56 +15741,63 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/mitchellh/mapstructure -Version: v1.4.1 -Licence type (autodetected): MIT +Dependency : github.com/tklauser/go-sysconf +Version: v0.3.6 +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.4.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/tklauser/go-sysconf@v0.3.6/LICENSE: -The MIT License (MIT) +BSD 3-Clause License -Copyright (c) 2013 Mitchell Hashimoto +Copyright (c) 2018-2021, Tobias Klauser +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/modern-go/concurrent -Version: v0.0.0-20180306012644-bacd9c7ef1dd +Dependency : github.com/tklauser/numcpus +Version: v0.2.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0.0.0-20180306012644-bacd9c7ef1dd/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2/LICENSE: Apache License Version 2.0, January 2004 @@ -14740,7 +15979,7 @@ Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0 APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -14748,7 +15987,7 @@ Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} Authors of Cilium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14763,18 +16002,50 @@ Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0 limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/opencontainers/go-digest -Version: v1.0.0 -Licence type (autodetected): Apache-2.0 +Dependency : github.com/ugorji/go/codec +Version: v1.1.8 +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/opencontainers/go-digest@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/ugorji/go/codec@v1.1.8/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/urso/diag +Version: v0.0.0-20200210123136-21b3cc8eb797 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- +Contents of probable licence file $GOMODCACHE/github.com/urso/diag@v0.0.0-20200210123136-21b3cc8eb797/LICENSE: Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -14949,14 +16220,24 @@ Contents of probable licence file $GOMODCACHE/github.com/opencontainers/go-diges END OF TERMS AND CONDITIONS - Copyright 2019, 2020 OCI Contributors - Copyright 2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14966,13 +16247,12 @@ Contents of probable licence file $GOMODCACHE/github.com/opencontainers/go-diges -------------------------------------------------------------------------------- -Dependency : github.com/opencontainers/image-spec -Version: v1.0.2-0.20190823105129-775207bd45b6 +Dependency : github.com/urso/go-bin +Version: v0.0.0-20180220135811-781c575c9f0e Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/opencontainers/image-spec@v1.0.2-0.20190823105129-775207bd45b6/LICENSE: - +Contents of probable licence file $GOMODCACHE/github.com/urso/go-bin@v0.0.0-20180220135811-781c575c9f0e/LICENSE: Apache License Version 2.0, January 2004 @@ -15151,7 +16431,18 @@ Contents of probable licence file $GOMODCACHE/github.com/opencontainers/image-sp END OF TERMS AND CONDITIONS - Copyright 2016 The Linux Foundation. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15167,12 +16458,12 @@ Contents of probable licence file $GOMODCACHE/github.com/opencontainers/image-sp -------------------------------------------------------------------------------- -Dependency : github.com/opentracing/opentracing-go -Version: v1.2.0 +Dependency : github.com/urso/sderr +Version: v0.0.0-20200210124243-c2a16f3d43ec Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing-go@v1.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/urso/sderr@v0.0.0-20200210124243-c2a16f3d43ec/LICENSE: Apache License Version 2.0, January 2004 @@ -15354,7 +16645,7 @@ Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -15362,7 +16653,7 @@ Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 The OpenTracing Authors + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15378,81 +16669,198 @@ Contents of probable licence file $GOMODCACHE/github.com/opentracing/opentracing -------------------------------------------------------------------------------- -Dependency : github.com/pelletier/go-toml -Version: v1.7.0 -Licence type (autodetected): MIT +Dependency : github.com/xdg/scram +Version: v0.0.0-20180814205039-7eeb5667e42c +Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/pelletier/go-toml@v1.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/xdg/scram@v0.0.0-20180814205039-7eeb5667e42c/LICENSE: -The MIT License (MIT) -Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 1. Definitions. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. --------------------------------------------------------------------------------- -Dependency : github.com/pierrec/lz4 -Version: v2.5.2+incompatible -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4@v2.5.2+incompatible/LICENSE: + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -Copyright (c) 2015, Pierre Curto -All rights reserved. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -------------------------------------------------------------------------------- -Dependency : github.com/prometheus/procfs -Version: v0.6.0 +Dependency : github.com/xdg/stringprep +Version: v1.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/prometheus/procfs@v0.6.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/xdg/stringprep@v1.0.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -15629,212 +17037,17 @@ Contents of probable licence file $GOMODCACHE/github.com/prometheus/procfs@v0.6. incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/rcrowley/go-metrics -Version: v0.0.0-20201227073835-cf1acfcdf475 -Licence type (autodetected): BSD-2-Clause-FreeBSD --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/rcrowley/go-metrics@v0.0.0-20201227073835-cf1acfcdf475/LICENSE: - -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. - -------------------------------------------------------------------------------- -Dependency : github.com/rs/cors -Version: v1.7.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/rs/cors@v1.7.0/LICENSE: - -Copyright (c) 2014 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/shirou/gopsutil -Version: v3.21.5+incompatible -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/shirou/gopsutil@v3.21.5+incompatible/LICENSE: - -gopsutil is distributed under BSD license reproduced below. - -Copyright (c) 2014, WAKAYAMA Shirou -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the gopsutil authors nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------- -internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. - - - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- -Dependency : github.com/sirupsen/logrus -Version: v1.6.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/sirupsen/logrus@v1.6.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/spf13/afero -Version: v1.2.2 +Dependency : go.elastic.co/ecszap +Version: v1.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/spf13/afero@v1.2.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.0/LICENSE: + - Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -16009,174 +17222,41 @@ Contents of probable licence file $GOMODCACHE/github.com/spf13/afero@v1.2.2/LICE incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + END OF TERMS AND CONDITIONS --------------------------------------------------------------------------------- -Dependency : github.com/spf13/cast -Version: v1.3.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/spf13/cast@v1.3.1/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/spf13/jwalterweatherman -Version: v1.1.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/spf13/jwalterweatherman@v1.1.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/spf13/viper -Version: v1.7.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/spf13/viper@v1.7.1/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/subosito/gotenv -Version: v1.2.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/subosito/gotenv@v1.2.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2013 Alif Rachmawadi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/tklauser/go-sysconf -Version: v0.3.5 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/tklauser/go-sysconf@v0.3.5/LICENSE: - -BSD 3-Clause License - -Copyright (c) 2018-2021, Tobias Klauser -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + APPENDIX: How to apply the Apache License to your work. -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + Copyright 2020 Elastic and contributors -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/tklauser/numcpus -Version: v0.2.2 +Dependency : go.opencensus.io +Version: v0.23.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.23.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -16358,7 +17438,7 @@ Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2 APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -16366,7 +17446,7 @@ Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} Authors of Cilium + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16380,47 +17460,13 @@ Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.2.2 See the License for the specific language governing permissions and limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/ugorji/go/codec -Version: v1.1.8 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/ugorji/go/codec@v1.1.8/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- -Dependency : github.com/urso/diag -Version: v0.0.0-20200210123136-21b3cc8eb797 +Dependency : go.opentelemetry.io/contrib +Version: v0.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/urso/diag@v0.0.0-20200210123136-21b3cc8eb797/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib@v0.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -16626,12 +17672,12 @@ Contents of probable licence file $GOMODCACHE/github.com/urso/diag@v0.0.0-202002 -------------------------------------------------------------------------------- -Dependency : github.com/urso/go-bin -Version: v0.0.0-20180220135811-781c575c9f0e +Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +Version: v0.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/urso/go-bin@v0.0.0-20180220135811-781c575c9f0e/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -16837,12 +17883,12 @@ Contents of probable licence file $GOMODCACHE/github.com/urso/go-bin@v0.0.0-2018 -------------------------------------------------------------------------------- -Dependency : github.com/urso/sderr -Version: v0.0.0-20200210124243-c2a16f3d43ec +Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +Version: v0.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/urso/sderr@v0.0.0-20200210124243-c2a16f3d43ec/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -17048,13 +18094,12 @@ Contents of probable licence file $GOMODCACHE/github.com/urso/sderr@v0.0.0-20200 -------------------------------------------------------------------------------- -Dependency : github.com/xdg/scram -Version: v0.0.0-20180814205039-7eeb5667e42c +Dependency : go.opentelemetry.io/otel +Version: v1.0.0-RC1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/xdg/scram@v0.0.0-20180814205039-7eeb5667e42c/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.0.0-!r!c1/LICENSE: Apache License Version 2.0, January 2004 @@ -17220,26 +18265,52 @@ Contents of probable licence file $GOMODCACHE/github.com/xdg/scram@v0.0.0-201808 other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/xdg/stringprep -Version: v1.0.0 +Dependency : go.opentelemetry.io/otel/internal/metric +Version: v0.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/xdg/stringprep@v1.0.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/internal/metric@v0.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -17416,15 +18487,41 @@ Contents of probable licence file $GOMODCACHE/github.com/xdg/stringprep@v1.0.0/L incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + -------------------------------------------------------------------------------- -Dependency : go.elastic.co/ecszap -Version: v1.0.0 +Dependency : go.opentelemetry.io/otel/metric +Version: v0.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v0.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -17614,7 +18711,7 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.0/LICENS same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 Elastic and contributors + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17628,14 +18725,14 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.0/LICENS See the License for the specific language governing permissions and limitations under the License. + -------------------------------------------------------------------------------- -Dependency : go.opencensus.io -Version: v0.23.0 +Dependency : go.opentelemetry.io/otel/trace +Version: v1.0.0-RC1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.23.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.0.0-!r!c1/LICENSE: Apache License Version 2.0, January 2004 @@ -17839,6 +18936,7 @@ Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.23.0/LICENSE: See the License for the specific language governing permissions and limitations under the License. + -------------------------------------------------------------------------------- Dependency : go.uber.org/multierr Version: v1.7.0 @@ -17907,11 +19005,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 -Version: v0.0.0-20210323180902-22b0adad7558 +Version: v0.0.0-20210514164344-f6687ab2804c Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.0.0-20210323180902-22b0adad7558/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.0.0-20210514164344-f6687ab2804c/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -18055,11 +19153,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20210630183607-d20f26d13c79 +Version: v0.0.0-20210716133855-ce7ef5c701ea Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20210630183607-d20f26d13c79/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20210716133855-ce7ef5c701ea/LICENSE: Apache License @@ -18340,207 +19438,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : gopkg.in/ini.v1 -Version: v1.52.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/gopkg.in/ini.v1@v1.52.0/LICENSE: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : gopkg.in/jcmturner/aescts.v1 Version: v1.0.1 @@ -20302,11 +21199,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.19.4/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/klog/v2 -Version: v2.8.0 +Version: v2.9.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.8.0/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.9.0/LICENSE: Apache License Version 2.0, January 2004 diff --git a/beater/jaeger/grpc_test.go b/beater/jaeger/grpc_test.go index 570976ae682..266f35e3c9d 100644 --- a/beater/jaeger/grpc_test.go +++ b/beater/jaeger/grpc_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/trace/jaeger" "github.com/elastic/beats/v7/libbeat/common" @@ -81,18 +81,14 @@ func (tc *testGRPCCollector) setup(t *testing.T) { beatertest.ClearRegistry(gRPCCollectorMonitoringMap) if tc.request == nil { traces := pdata.NewTraces() - resourceSpans := pdata.NewResourceSpans() - spans := pdata.NewInstrumentationLibrarySpans() - span0 := pdata.NewSpan() + resourceSpans := traces.ResourceSpans().AppendEmpty() + spans := resourceSpans.InstrumentationLibrarySpans().AppendEmpty() + span0 := spans.Spans().AppendEmpty() span0.SetTraceID(pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})) span0.SetSpanID(pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF})) - span1 := pdata.NewSpan() + span1 := spans.Spans().AppendEmpty() span1.SetTraceID(pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})) span1.SetSpanID(pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF})) - spans.Spans().Append(span0) - spans.Spans().Append(span1) - resourceSpans.InstrumentationLibrarySpans().Append(spans) - traces.ResourceSpans().Append(resourceSpans) batches, err := jaeger.InternalTracesToJaegerProto(traces) require.NoError(t, err) diff --git a/beater/jaeger/http_test.go b/beater/jaeger/http_test.go index 182b9548558..aeb9aeb001a 100644 --- a/beater/jaeger/http_test.go +++ b/beater/jaeger/http_test.go @@ -31,7 +31,7 @@ import ( jaegerthrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "github.com/elastic/apm-server/beater/beatertest" "github.com/elastic/apm-server/beater/request" diff --git a/beater/otlp/grpc.go b/beater/otlp/grpc.go index d6c4d27d69a..c3d7bca6c2a 100644 --- a/beater/otlp/grpc.go +++ b/beater/otlp/grpc.go @@ -80,10 +80,10 @@ func RegisterGRPCServices(grpcServer *grpc.Server, processor model.BatchProcesso // dynamically registered and unregistered. setCurrentMonitoredConsumer(consumer) - if err := otlpreceiver.RegisterTraceReceiver(context.Background(), consumer, grpcServer, nil); err != nil { + if err := otlpreceiver.RegisterTraceReceiver(context.Background(), consumer, grpcServer); err != nil { return errors.Wrap(err, "failed to register OTLP trace receiver") } - if err := otlpreceiver.RegisterMetricsReceiver(context.Background(), consumer, grpcServer, nil); err != nil { + if err := otlpreceiver.RegisterMetricsReceiver(context.Background(), consumer, grpcServer); err != nil { return errors.Wrap(err, "failed to register OTLP metrics receiver") } return nil diff --git a/beater/otlp/grpc_test.go b/beater/otlp/grpc_test.go index e768105b33f..917a7f76c32 100644 --- a/beater/otlp/grpc_test.go +++ b/beater/otlp/grpc_test.go @@ -19,16 +19,14 @@ package otlp_test import ( "context" - "encoding/json" "errors" "net" - "reflect" - "strings" "testing" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" "google.golang.org/grpc" "google.golang.org/grpc/status" @@ -39,13 +37,6 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring" ) -var ( - exportMetricsServiceRequestType = proto.MessageType("opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") - exportMetricsServiceResponseType = proto.MessageType("opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") - exportTraceServiceRequestType = proto.MessageType("opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") - exportTraceServiceResponseType = proto.MessageType("opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") -) - func TestConsumeTraces(t *testing.T) { var batches []model.Batch var reportError error @@ -54,41 +45,23 @@ func TestConsumeTraces(t *testing.T) { return reportError } + conn := newServer(t, batchProcessor) + client := otlpgrpc.NewTracesClient(conn) + // Send a minimal trace to verify that everything is connected properly. // // We intentionally do not check the published event contents; those are // tested in processor/otel. - cannedRequest := jsonExportTraceServiceRequest(`{ -"resource_spans": [ - { - "instrumentation_library_spans": [ - { - "spans": [ - { - "trace_id": "0123456789abcdef0123456789abcdef", - "span_id": "945254c567a5417e", - "name": "operation_name" - } - ] - } - ] - } -] -}`) + traces := pdata.NewTraces() + span := traces.ResourceSpans().AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans().AppendEmpty() + span.SetName("operation_name") - conn := newServer(t, batchProcessor) - err := conn.Invoke( - context.Background(), "/opentelemetry.proto.collector.trace.v1.TraceService/Export", - cannedRequest, newExportTraceServiceResponse(), - ) + _, err := client.Export(context.Background(), traces) assert.NoError(t, err) require.Len(t, batches, 1) reportError = errors.New("failed to publish events") - err = conn.Invoke( - context.Background(), "/opentelemetry.proto.collector.trace.v1.TraceService/Export", - cannedRequest, newExportTraceServiceResponse(), - ) + _, err = client.Export(context.Background(), traces) assert.Error(t, err) errStatus := status.Convert(err) assert.Equal(t, "failed to publish events", errStatus.Message()) @@ -117,39 +90,26 @@ func TestConsumeMetrics(t *testing.T) { return reportError } + conn := newServer(t, batchProcessor) + client := otlpgrpc.NewMetricsClient(conn) + // Send a minimal metric to verify that everything is connected properly. // // We intentionally do not check the published event contents; those are // tested in processor/otel. - cannedRequest := jsonExportMetricsServiceRequest(`{ -"resource_metrics": [ - { - "instrumentation_library_metrics": [ - { - "metrics": [ - { - "name": "metric_name" - } - ] - } - ] - } -] -}`) + metrics := pdata.NewMetrics() + metric := metrics.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("metric_type") + metric.SetDataType(pdata.MetricDataTypeSummary) + metric.Summary().DataPoints().AppendEmpty() - conn := newServer(t, batchProcessor) - err := conn.Invoke( - context.Background(), "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", - cannedRequest, newExportMetricsServiceResponse(), - ) + _, err := client.Export(context.Background(), metrics) assert.NoError(t, err) reportError = errors.New("failed to publish events") - err = conn.Invoke( - context.Background(), "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", - cannedRequest, newExportMetricsServiceResponse(), - ) + _, err = client.Export(context.Background(), metrics) assert.Error(t, err) + errStatus := status.Convert(err) assert.Equal(t, "failed to publish events", errStatus.Message()) @@ -173,34 +133,6 @@ func TestConsumeMetrics(t *testing.T) { }, actual) } -func jsonExportTraceServiceRequest(j string) interface{} { - request := reflect.New(exportTraceServiceRequestType.Elem()).Interface() - decoder := json.NewDecoder(strings.NewReader(j)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(request); err != nil { - panic(err) - } - return request -} - -func newExportTraceServiceResponse() interface{} { - return reflect.New(exportTraceServiceResponseType.Elem()).Interface() -} - -func jsonExportMetricsServiceRequest(j string) interface{} { - request := reflect.New(exportMetricsServiceRequestType.Elem()).Interface() - decoder := json.NewDecoder(strings.NewReader(j)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(request); err != nil { - panic(err) - } - return request -} - -func newExportMetricsServiceResponse() interface{} { - return reflect.New(exportMetricsServiceResponseType.Elem()).Interface() -} - func newServer(t *testing.T, batchProcessor model.BatchProcessor) *grpc.ClientConn { lis, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) diff --git a/go.mod b/go.mod index 8c1daa551f6..e1ec895a5c1 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.13 require ( github.com/DataDog/zstd v1.4.4 // indirect github.com/akavel/rsrc v0.10.2 // indirect - github.com/apache/thrift v0.14.1 + github.com/apache/thrift v0.14.2 github.com/cespare/xxhash/v2 v2.1.1 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b @@ -28,13 +28,14 @@ require ( github.com/go-sourcemap/sourcemap v2.1.3+incompatible github.com/gofrs/uuid v4.0.0+incompatible github.com/gogo/protobuf v1.3.2 - github.com/google/pprof v0.0.0-20210323184331-8eee2492667d + github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 + github.com/gorilla/websocket v1.4.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 - github.com/jaegertracing/jaeger v1.23.0 + github.com/jaegertracing/jaeger v1.24.0 github.com/josephspurrier/goversioninfo v1.2.0 // indirect - github.com/json-iterator/go v1.1.10 + github.com/json-iterator/go v1.1.11 github.com/jstemmer/go-junit-report v0.9.1 github.com/magefile/mage v1.11.0 github.com/mattn/go-isatty v0.0.13 // indirect @@ -45,8 +46,8 @@ require ( github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/reviewdog/reviewdog v0.9.17 - github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 - github.com/spf13/cobra v1.1.3 + github.com/ryanuber/go-glob v1.0.0 + github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c @@ -61,16 +62,17 @@ require ( go.elastic.co/ecszap v1.0.0 // indirect go.elastic.co/fastjson v1.1.0 go.elastic.co/go-licence-detector v0.5.0 - go.opentelemetry.io/collector v0.28.0 - go.uber.org/atomic v1.8.0 + go.opentelemetry.io/collector v0.30.0 + go.opentelemetry.io/collector/model v0.30.0 + go.uber.org/atomic v1.9.0 go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.18.1 golang.org/x/net v0.0.0-20210614182718-04defd469f4e golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect - golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - golang.org/x/tools v0.1.4 - google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79 // indirect + golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 + golang.org/x/tools v0.1.5 + google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea // indirect google.golang.org/grpc v1.39.0 gopkg.in/yaml.v2 v2.4.0 gotest.tools/gotestsum v0.6.0 @@ -82,6 +84,7 @@ replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20210120173147-5c8cb347d877 + github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v0.9.0 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 @@ -96,4 +99,8 @@ replace ( k8s.io/client-go => k8s.io/client-go v0.19.4 ) +// We replace golang/glog, which is used by ristretto, to avoid polluting the +// command line flags and conflicting with command line flags added by libbeat. +replace github.com/golang/glog => github.com/slok/noglog v0.2.0 + replace go.opentelemetry.io/collector => ./internal/otel_collector diff --git a/go.sum b/go.sum index aded2ec2b40..b2600365375 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,10 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0 h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,6 +33,7 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -57,14 +60,14 @@ contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlecAivazis/survey/v2 v2.1.1 h1:LEMbHE0pLj75faaVEKClEX1TM4AJmmnOh9eimREzLWI= github.com/AlecAivazis/survey/v2 v2.1.1/go.mod h1:9FJRdMdDm8rnT+zHVbvQT2RTSTLq0Ttd6q3Vl2fahjk= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-event-hubs-go/v3 v3.1.2/go.mod h1:hR40byNJjKkS74+3RhloPQ8sJ8zFQeJ920Uk3oYY0+k= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= @@ -74,12 +77,16 @@ github.com/Azure/go-autorest v12.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -101,7 +108,9 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/godog v0.7.13/go.mod h1:z2OZ6a3X0/YAKVqLfVzYBwFt3j6uSt3Xrqa7XTtcQE0= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.4 h1:+IawcoXhCBylN7ccwdwf8LOH2jKq7NavGpEPanrlTzE= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -113,6 +122,7 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= @@ -124,6 +134,7 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -158,17 +169,20 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.14.1 h1:Yh8v0hpCj63p5edXOLaqTJW0IJ1p+eMW6+YSOqw1d6s= -github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -185,17 +199,23 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:W github.com/aws/aws-lambda-go v1.6.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.60/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.9.0 h1:dWtJKGRFv3UZkMBQaIzMsF0/y4ge3iQPWTzeC4r/vl4= github.com/aws/aws-sdk-go-v2 v0.9.0/go.mod h1:sa1GePZ/LfBGI4dSq30f6uR4Tthll8axxtEPvlpXZ8U= -github.com/aws/aws-sdk-go-v2 v0.18.0 h1:qZ+woO4SamnH/eEbjM2IDLhRNwIwND/RQyVlBLp3Jqg= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/awslabs/goformation/v3 v3.1.0/go.mod h1:hQ5RXo3GNm2laHWKizDzU5DsDy+yNcenSca2UxN0850= github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/aymerick/raymond v2.0.2+incompatible h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0= github.com/aymerick/raymond v2.0.2+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -203,27 +223,32 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/bradleyfalzon/ghinstallation v1.1.0 h1:mwazVinJU0mPyLxIcdtJzu4DhWXFO5lMsWhKyFRIwFk= github.com/bradleyfalzon/ghinstallation v1.1.0/go.mod h1:p7iD8KytOOKg2wCqbwvJlq4JGpYMjwjkiqdyUqOIHLI= github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -232,6 +257,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f h1:fK3ikA1s77arBhpDwFuyO0hUZ2Aa8O6o2Uzy8Q6iLbs= @@ -263,7 +290,6 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -271,6 +297,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -294,26 +321,29 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b h1:mUDs72Rlzv6A4YN8w3Ra3hU9x/plOQPcQjZYL/1f5SM= github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible h1:4jGdduO4ceTJFKf0IhgaB8NJapGqKHwC2b4xQ/cXujM= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw= -github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.62.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= @@ -418,14 +448,18 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= @@ -434,12 +468,15 @@ github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= @@ -454,6 +491,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -523,6 +563,7 @@ github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= @@ -560,6 +601,9 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= @@ -568,6 +612,7 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -600,6 +645,7 @@ github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.10.4/go.mod h1:9MVLtu25FBJBMHkPs0m3Ngf/VmwGcLpM2HS8PlNGw9U= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b h1:3QNh5Xo2pmr2nZXENtnztfpjej8XY8EPmvYxF5SzY9M= @@ -613,19 +659,22 @@ github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6x github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -649,19 +698,22 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc= github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -692,19 +744,22 @@ github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210323184331-8eee2492667d h1:Rwivyny4wymF1qWzOk800eSVa/n9njfdOm+kHjiQhZQ= -github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -717,9 +772,11 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= @@ -745,7 +802,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5 h1:xI88renBpIJws9OfEQq4Dng10OppnY5u9bTok/GDFEI= @@ -753,30 +810,41 @@ github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5/go.mod h1:319b3zT github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -787,6 +855,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -799,27 +868,38 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/nomad/api v0.0.0-20200303134319-e31695b5bbe6 h1:AwxY7x4xvPonxiMJ+6VkXEqTOWoNYWyku+ojyot5DRk= github.com/hashicorp/nomad/api v0.0.0-20200303134319-e31695b5bbe6/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= +github.com/hashicorp/nomad/api v0.0.0-20201203164818-6318a8ac7bf8 h1:Yrz9yGVJf5Ce2KS7x8hS/MUTIeBmGEhF8nhzolRpSqY= +github.com/hashicorp/nomad/api v0.0.0-20201203164818-6318a8ac7bf8/go.mod h1:vYHP9jMXk4/T2qNUbWlQ1OHCA1hHLil3nvqSmz8mtgc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01 h1:HiJF8Mek+I7PY0Bm+SuhkwaAZSZP83sw6rrTMrgZ0io= github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01/go.mod h1:1DWDZmeYf0LX30zscWb7K9rUMeirNeBMd5Dum+seUhc= github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1/go.mod h1:RsN5RGgVYeXpcXNtWyztD5VIe7VNSEqpJvF2iEH7QvI= github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5/go.mod h1:FGO/dXIFZnan7KvvUSFk1hYMnoVNzB6NTMPrmke8SSI= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= +github.com/hetznercloud/hcloud-go v1.26.2/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -827,18 +907,26 @@ github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.113.0/go.mod h1:3TJtvbm/Kwuo5/PEo5P6HUzwVg4bXWkb2wPQHPtQdlU= +github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= +github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.9.2/go.mod h1:UEe3MeD9AaP5rlPIes102IhYua3FhIWZuOXNHxDjSrI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/pkg-config v0.2.6/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2/go.mod h1:CfMdguCK66I5DAUJgGKyNz8aB6vO5dZzkm9Xep6WGvw= -github.com/jaegertracing/jaeger v1.23.0 h1:jdv6xzB7esPVIbXXZ5GWkFwX0cGwfbGJVf//xYnV0v8= -github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.24.0 h1:wbzvajFSsV3j5843nIlyUa70+uQevKsT3l7MV29jlxU= +github.com/jaegertracing/jaeger v1.24.0/go.mod h1:mqdtFDA447va5j0UewDaAWyNlGreGQyhGxXVhbF58gQ= github.com/jarcoal/httpmock v1.0.4 h1:jp+dy/+nonJE4g4xbVtl9QdrUNbn6/3hDT5R4nDIZnA= github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -848,8 +936,10 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -857,6 +947,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= @@ -872,12 +963,14 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -900,16 +993,19 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knadh/koanf v1.1.1 h1:doO5UBvSXcmngdr/u54HKe+Uz4ZZw0/YHVzSsnE3vD4= +github.com/knadh/koanf v1.1.1/go.mod h1:xpPTwMhsA/aaQLAilyCCqfpEiY1gpa160AiCuWHJUjY= github.com/kolide/osquery-go v0.0.0-20200604192029-b019be7063ac/go.mod h1:rp36fokOKgd/5mOgbvv4fkpdaucQ43mnvb+8BR62Xo8= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -923,14 +1019,17 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.28.5/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -939,8 +1038,7 @@ github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXq github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -969,6 +1067,7 @@ github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6I github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -992,11 +1091,18 @@ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQ github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.42 h1:gWGe42RGaIqXQZ+r3WUGEKBEtvPHY2SXo4dqixDNxuY= +github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1012,11 +1118,15 @@ github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1036,13 +1146,18 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -1104,14 +1219,16 @@ github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1121,6 +1238,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1128,6 +1246,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -1137,7 +1256,8 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1145,12 +1265,15 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc/go.mod h1:ikMPikHu8SMvBGWoKulvvOOZN227amf2E9eMYqyAwAY= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1169,7 +1292,9 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= -github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1185,9 +1310,13 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= +github.com/prometheus/procfs v0.7.0 h1:OQZ41sZU9XkRpzrz8/TD0EldH/Rwbddkdu5wDyUwzfE= +github.com/prometheus/procfs v0.7.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/prometheus v1.8.2-0.20210621150501-ff58416a0b02/go.mod h1:fC6ROpjS/2o+MQTO7X8NSZLhLBSNlDzxaeDMqQm+TUM= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= @@ -1201,6 +1330,7 @@ github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd h1:fvaEkjpr2 github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd/go.mod h1:giYAXnpegRDPsXUO7TRpDKXJo1lFGYxyWRfEt5iQ+OA= github.com/reviewdog/reviewdog v0.9.17 h1:MKb3rlQZgkEXr3d85iqtYNITXn7gDJr2kT0IhgX/X9A= github.com/reviewdog/reviewdog v0.9.17/go.mod h1:Y0yPFDTi9L5ohkoecJdgbvAhq+dUXp+zI7atqVibwKg= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1208,14 +1338,16 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1225,7 +1357,9 @@ github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQg github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= @@ -1235,8 +1369,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.20.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= -github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.6+incompatible h1:mmZtAlWSd8U2HeRTjswbnDLPxqsEoK01NK+GZ1P+nEM= +github.com/shirou/gopsutil v3.21.6+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1247,13 +1381,17 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slok/noglog v0.2.0 h1:1czu4l2EoJ8L92UwdSXXa1Y+c5TIjFAFm2P+mjej95E= +github.com/slok/noglog v0.2.0/go.mod h1:TfKxwpEZPT+UA83bQ6RME146k0MM4e8mwHLf6bhcGDI= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -1261,8 +1399,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1271,10 +1409,9 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1284,12 +1421,12 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -1306,7 +1443,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= @@ -1321,8 +1457,9 @@ github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/Hmo4= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1330,14 +1467,19 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b/go.mod h1:jAqhj/JBVC1PwcLTWd6rjQyGyItxxrhpiBl8LSuAGmw= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786 h1:B/IVHYiI0d04dudYw+CvCAGqSMq8d0yWy56eD6p85BQ= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786/go.mod h1:RIkfovP3Y7my19aXEjjbNd9E5TlHozzAyt7B8AaEcwg= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= +github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.1.8 h1:/D9x7IRpfMHDlizVOgxrag5Fh+/NY+LtI8bsr+AswRA= github.com/ugorji/go v1.1.8/go.mod h1:0lNM99SwWUIRhCXnigEMClngXBk/EmpTXa7mgiewYWA= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.8 h1:4dryPvxMP9OtkjIbuNeK2nb27M38XMHLGlfNSNph/5s= github.com/ugorji/go/codec v1.1.8/go.mod h1:X00B19HDtwvKbQY2DcYjvZxKQp8mzrJoQ6EgoIY/D2E= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1360,6 +1502,7 @@ github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= github.com/xanzy/go-gitlab v0.22.3/go.mod h1:t4Bmvnxj7k37S4Y17lfLx+nLqkf/oQwT2HagfWKv5Og= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -1383,7 +1526,7 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -1413,10 +1556,15 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= @@ -1434,21 +1582,44 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= -go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= +go.opentelemetry.io/collector v0.30.0 h1:xeNeDKupLbx6L05wG9qrKeEZhiodjgkxuY3EeUE9J94= +go.opentelemetry.io/collector v0.30.0/go.mod h1:ER3czrkP35ADgO89Q31KGljiQpPW09FKWrXRH61X4BM= +go.opentelemetry.io/collector/model v0.30.0 h1:hK/nxq5IABHnpdGHRgeRtg89BErvYcI2zxD78QFmysA= +go.opentelemetry.io/collector/model v0.30.0/go.mod h1:PcHNnM+RUl0uD8VkSn93PO78N7kQYhfqpI/eki57pl4= +go.opentelemetry.io/contrib v0.21.0 h1:RMJ6GlUVzLYp/zmItxTTdAmr1gnpO/HHMFmvjAhvJQM= +go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0 h1:68WZYF6CrnsXIVDYc51cR9VmTX2IM7y0svo7s4lu5kQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0 h1:G1vNyNfKknFvrKVC8ga8EYIECy0s5D/QPW4QPRSMhwc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0/go.mod h1:JQAtechjxLEL81EjmbRwxBq/XEzGaHcsPuDHAx54hg4= +go.opentelemetry.io/otel v1.0.0-RC1 h1:4CeoX93DNTWt8awGK9JmNXzF9j7TyOu9upscEdtcdXc= +go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel/internal/metric v0.21.0 h1:gZlIBo5O51hZOOZz8vEcuRx/l5dnADadKfpT70AELoo= +go.opentelemetry.io/otel/internal/metric v0.21.0/go.mod h1:iOfAaY2YycsXfYD4kaRSbLx2LKmfpKObWBEv9QK5zFo= +go.opentelemetry.io/otel/metric v0.21.0 h1:ZtcJlHqVE4l8Su0WOLOd9fEPheJuYEiQ0wr9wv2p25I= +go.opentelemetry.io/otel/metric v0.21.0/go.mod h1:JWCt1bjivC4iCrz/aCrM1GSw+ZcvY44KCbaeeRhzHnc= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= +go.opentelemetry.io/otel/trace v1.0.0-RC1 h1:jrjqKJZEibFrDz+umEASeU3LvdVyWKlnTh7XEfwrT58= +go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= @@ -1457,10 +1628,12 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1475,15 +1648,20 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1558,6 +1736,7 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1565,6 +1744,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1583,11 +1763,16 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210716203947-853a461950ff h1:j2EK/QoxYNBsXI4R7fQkkRUk8y6wnOBI+6hgPdP/6Ds= +golang.org/x/net v0.0.0-20210716203947-853a461950ff/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1601,8 +1786,9 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1628,6 +1814,8 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1659,10 +1847,12 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1701,14 +1891,19 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1718,6 +1913,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1728,8 +1924,9 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1764,8 +1961,11 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1779,6 +1979,7 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1802,6 +2003,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1821,16 +2023,22 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79 h1:s1jFTXJryg4a1mew7xv03VZD8N9XjxFhk1o4Js4WvPQ= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1840,6 +2048,7 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1850,9 +2059,13 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20201010204749-3c400e7fcc87 h1:JA56ipSuANY2Fwx4OITOAj+QXlHyCJEma6VVWTRBG+k= google.golang.org/grpc/examples v0.0.0-20201010204749-3c400e7fcc87/go.mod h1:Lh55/1hxmVHEkOvSIQ2uj0P12QyOCUNyRwnUlSS13hw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1871,13 +2084,15 @@ google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+Rur google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -1885,11 +2100,11 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= -gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= @@ -1902,7 +2117,7 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= @@ -1954,11 +2169,12 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go index a5c2a4e2a90..e8602b95b1b 100644 --- a/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go +++ b/internal/.otel_collector_mixin/receiver/otlpreceiver/mixin.go @@ -17,56 +17,33 @@ package otlpreceiver import ( "context" - gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" "google.golang.org/grpc" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - collectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" + "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) -// RegisterTraceReceiver registers the trace receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterTraceReceiver registers the trace receiver with a gRPC server. +func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server) error { receiver := trace.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectortrace.RegisterTraceServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - err := collectortrace.RegisterTraceServiceHandlerServer(ctx, gatewayMux, receiver) - if err != nil { - return err - } - // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 - return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterTracesServer(serverGRPC, receiver) return nil } -// RegisterMetricsReceiver registers the metrics receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server. +func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server) error { receiver := metrics.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectormetrics.RegisterMetricsServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterMetricsServer(serverGRPC, receiver) return nil } -// RegisterLogsReceiver registers the logs receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterLogsReceiver registers the logs receiver with a gRPC server. +func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server) error { receiver := logs.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectorlog.RegisterLogsServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - return collectorlog.RegisterLogsServiceHandlerServer(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterLogsServer(serverGRPC, receiver) return nil } diff --git a/internal/otel_collector/CHANGELOG.md b/internal/otel_collector/CHANGELOG.md index f3633931471..86316e7a003 100644 --- a/internal/otel_collector/CHANGELOG.md +++ b/internal/otel_collector/CHANGELOG.md @@ -2,6 +2,80 @@ ## Unreleased +## v0.30.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `pdata.DoubleSum` to `pdata.Sum` (#3583) +- Rename `pdata.DoubleGauge` to `pdata.Gauge` (#3599) +- Migrated `pdata` to a dedicated package (#3483) +- Change Marshaler/Unmarshaler to be consistent with other interfaces (#3502) +- Remove consumer/simple package (#3438) +- Remove unnecessary interfaces from pdata (#3506) +- zipkinv1 implement directly Unmarshaler interface (#3504) +- zipkinv2 implement directly Marshaler/Unmarshaler interface (#3505) +- Change exporterhelper to accept ExporterCreateSettings instead of just logger (#3569) +- Deprecate Resize() from pdata slice APIs (#3573) +- Use Func pattern in processorhelper, consistent with others (#3570) + +## 💡 Enhancements 💡 + +- Update OTLP to v0.8.0 (#3572) +- Migrate from OpenCensus to OpenTelemetry for internal tracing (#3567) +- Move internal/pdatagrpc to model/otlpgrpc (#3507) +- Move internal/otlp to model/otlp (#3508) +- Create http Server via Config, enable cors and decompression (#3513) +- Allow users to set min and max TLS versions (#3591) +- Support setting ballast size in percentage of total Mem in ballast extension (#3456) +- Publish go.opentelemetry.io/collector/model as a separate module (#3530) +- Pass a TracerProvider via construct settings to all the components (#3592) +- Make graceful shutdown optional (#3577) + +## 🧰 Bug fixes 🧰 + +- `scraperhelper`: Include the scraper name in log messages (#3487) +- `scraperhelper`: fix case when returned pdata is empty (#3520) +- Record the correct number of points not metrics in Kafka receiver (#3553) +- Validate the Prometheus configuration (#3589) + +## v0.29.0 Beta + +## 🛑 Breaking changes 🛑 + +- Rename `service.Application` to `service.Collector` (#3268) +- Provide case sensitivity in config yaml mappings by using Koanf instead of Viper (#3337) +- Move zipkin constants to an internal package (#3431) +- Disallow renaming metrics using metric relabel configs (#3410) +- Move cgroup and iruntime utils from memory_limiter to internal folder (#3448) +- Move model pdata interfaces to pdata, expose them publicly (#3455) + +## 💡 Enhancements 💡 + +- Change obsreport helpers for scraper to use the same pattern as Processor/Exporter (#3327) +- Convert `otlptext` to implement Marshaler interfaces (#3366) +- Add encoder/decoder and marshaler/unmarshaler for OTLP protobuf (#3401) +- Use the new marshaler/unmarshaler in `kafka` exporter (#3403) +- Convert `zipkinv2` to to/from translator interfaces (#3409) +- `zipkinv1`: Move to translator and encoders interfaces (#3419) +- Use the new marshaler/unmarshaler in `kafka` receiver #3402 +- Change `oltp` receiver to use the new unmarshaler, avoid grpc-gateway dependency (#3406) +- Use the new Marshaler in the `otlphttp` exporter (#3433) +- Add grpc response struct for all signals instead of returning interface in `otlp` receiver/exporter (#3437) +- `zipkinv2`: Add encoders, decoders, marshalers (#3426) +- `scrapererror` receiver: Return concrete error type (#3360) +- `kafka` receiver: Add metrics support (#3452) +- `prometheus` receiver: + - Add store to track stale metrics (#3414) + - Add `up` and `scrape_xxxx` internal metrics (#3116) + +## 🧰 Bug fixes 🧰 + +- `prometheus` receiver: + - Reject datapoints with duplicate label keys (#3408) + - Scrapers are not stopped when receiver is shutdown (#3450) +- `prometheusremotewrite` exporter: Adjust default retry settings (#3416) +- `hostmetrics` receiver: Fix missing startTimestamp for `processes` scraper (#3461) + ## v0.28.0 Beta ## 🛑 Breaking changes 🛑 @@ -33,6 +107,7 @@ ## 💡 Enhancements 💡 - Add `doc.go` files to the consumer package and its subpackages (#3270) +- Improve documentation of consumer package and subpackages (#3269, #3361) - Automate triggering of doc-update on release (#3234) - Enable Dependabot for Github Actions (#3312) - Remove the proto dependency in `goldendataset` for traces (#3322) diff --git a/internal/otel_collector/Makefile b/internal/otel_collector/Makefile index a6da8a3f5bb..f4ec8fad7df 100644 --- a/internal/otel_collector/Makefile +++ b/internal/otel_collector/Makefile @@ -8,7 +8,7 @@ ALL_SRC := $(shell find . -name '*.go' \ -not -path './cmd/checkdoc/*' \ -not -path './internal/tools/*' \ -not -path './examples/demo/app/*' \ - -not -path './internal/data/protogen/*' \ + -not -path './model/internal/data/protogen/*' \ -not -path './service/internal/zpages/tmplgen/*' \ -type f | sort) @@ -26,7 +26,7 @@ GOOS=$(shell go env GOOS) GOARCH=$(shell go env GOARCH) BUILD_INFO_IMPORT_PATH=go.opentelemetry.io/collector/internal/version -VERSION=$(shell git describe --match "v[0-9]*" HEAD) +VERSION=$(shell git describe --always --match "v[0-9]*" HEAD) BUILD_INFO=-ldflags "-X $(BUILD_INFO_IMPORT_PATH).Version=$(VERSION)" RUN_CONFIG?=examples/local/otel-config.yaml @@ -49,6 +49,10 @@ endef .DEFAULT_GOAL := all +.PHONY: version +version: + @echo ${VERSION} + .PHONY: all all: checklicense checkdoc misspell goimpi golint gotest otelcol @@ -251,7 +255,6 @@ binaries-windows_amd64: build-binary-internal: GO111MODULE=on CGO_ENABLED=0 go build -trimpath -o ./bin/otelcol_$(GOOS)_$(GOARCH)$(EXTENSION) $(BUILD_INFO) ./cmd/otelcol - .PHONY: deb-rpm-package %-package: ARCH ?= amd64 %-package: @@ -289,21 +292,21 @@ gendependabot: # Definitions for ProtoBuf generation. # The source directory for OTLP ProtoBufs. -OPENTELEMETRY_PROTO_SRC_DIR=internal/data/opentelemetry-proto +OPENTELEMETRY_PROTO_SRC_DIR=model/internal/opentelemetry-proto # Find all .proto files. OPENTELEMETRY_PROTO_FILES := $(subst $(OPENTELEMETRY_PROTO_SRC_DIR)/,,$(wildcard $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/*/v1/*.proto $(OPENTELEMETRY_PROTO_SRC_DIR)/opentelemetry/proto/collector/*/v1/*.proto)) # Target directory to write generated files to. -PROTO_TARGET_GEN_DIR=internal/data/protogen +PROTO_TARGET_GEN_DIR=model/internal/data/protogen # Go package name to use for generated files. PROTO_PACKAGE=go.opentelemetry.io/collector/$(PROTO_TARGET_GEN_DIR) # Intermediate directory used during generation. -PROTO_INTERMEDIATE_DIR=internal/data/.patched-otlp-proto +PROTO_INTERMEDIATE_DIR=model/internal/.patched-otlp-proto -DOCKER_PROTOBUF ?= otel/build-protobuf:0.2.1 +DOCKER_PROTOBUF ?= otel/build-protobuf:0.4.1 PROTOC := docker run --rm -u ${shell id -u} -v${PWD}:${PWD} -w${PWD}/$(PROTO_INTERMEDIATE_DIR) ${DOCKER_PROTOBUF} --proto_path=${PWD} PROTO_INCLUDES := -I/usr/include/github.com/gogo/protobuf -I./ @@ -333,11 +336,6 @@ genproto_sub: @echo Generate Go code from .proto files in intermediate directory. $(foreach file,$(OPENTELEMETRY_PROTO_FILES),$(call exec-command,$(PROTOC) $(PROTO_INCLUDES) --gogofaster_out=plugins=grpc:./ $(file))) - @echo Generate gRPC gateway code. - $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/trace/v1/trace_service_http.yaml:./ opentelemetry/proto/collector/trace/v1/trace_service.proto - $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml:./ opentelemetry/proto/collector/metrics/v1/metrics_service.proto - $(PROTOC) $(PROTO_INCLUDES) --grpc-gateway_out=logtostderr=true,grpc_api_configuration=opentelemetry/proto/collector/logs/v1/logs_service_http.yaml:./ opentelemetry/proto/collector/logs/v1/logs_service.proto - @echo Move generated code to target directory. mkdir -p $(PROTO_TARGET_GEN_DIR) cp -R $(PROTO_INTERMEDIATE_DIR)/$(PROTO_PACKAGE)/* $(PROTO_TARGET_GEN_DIR)/ @@ -358,6 +356,7 @@ genpdata: check-contrib: @echo Setting contrib at $(CONTRIB_PATH) to use this core checkout make -C $(CONTRIB_PATH) for-all CMD="go mod edit -replace go.opentelemetry.io/collector=$(CURDIR)" + make -C $(CONTRIB_PATH) for-all CMD="go mod edit -replace go.opentelemetry.io/collector/model=$(CURDIR)/model" make -C $(CONTRIB_PATH) gotidy make -C $(CONTRIB_PATH) test @echo Restoring contrib to no longer use this core checkout @@ -389,6 +388,13 @@ checkdoc: apidiff-build: @$(foreach pkg,$(ALL_PKGS),$(call exec-command,./internal/buildscripts/gen-apidiff.sh -p $(pkg))) +# If we are running in CI, change input directory +ifeq ($(CI), true) +APICOMPARE_OPTS=$(COMPARE_OPTS) +else +APICOMPARE_OPTS=-d "./internal/data/apidiff" +endif + # Compare API state snapshots .PHONY: apidiff-compare apidiff-compare: diff --git a/internal/otel_collector/cmd/otelcol/main.go b/internal/otel_collector/cmd/otelcol/main.go index 524e94d5da9..581876cb78f 100644 --- a/internal/otel_collector/cmd/otelcol/main.go +++ b/internal/otel_collector/cmd/otelcol/main.go @@ -37,20 +37,20 @@ func main() { Version: version.Version, } - if err := run(service.AppSettings{BuildInfo: info, Factories: factories}); err != nil { + if err := run(service.CollectorSettings{BuildInfo: info, Factories: factories}); err != nil { log.Fatal(err) } } -func runInteractive(settings service.AppSettings) error { +func runInteractive(settings service.CollectorSettings) error { app, err := service.New(settings) if err != nil { - return fmt.Errorf("failed to construct the application: %w", err) + return fmt.Errorf("failed to construct the collector server: %w", err) } err = app.Run() if err != nil { - return fmt.Errorf("application run finished with error: %w", err) + return fmt.Errorf("collector server run finished with error: %w", err) } return nil diff --git a/internal/otel_collector/cmd/otelcol/main_others.go b/internal/otel_collector/cmd/otelcol/main_others.go index 1998328880b..9c40f959310 100644 --- a/internal/otel_collector/cmd/otelcol/main_others.go +++ b/internal/otel_collector/cmd/otelcol/main_others.go @@ -18,6 +18,6 @@ package main import "go.opentelemetry.io/collector/service" -func run(settings service.AppSettings) error { +func run(settings service.CollectorSettings) error { return runInteractive(settings) } diff --git a/internal/otel_collector/cmd/otelcol/main_windows.go b/internal/otel_collector/cmd/otelcol/main_windows.go index e5e600898bc..db4106bdbb7 100644 --- a/internal/otel_collector/cmd/otelcol/main_windows.go +++ b/internal/otel_collector/cmd/otelcol/main_windows.go @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/collector/service" ) -func run(set service.AppSettings) error { +func run(set service.CollectorSettings) error { if useInteractiveMode, err := checkUseInteractiveMode(); err != nil { return err } else if useInteractiveMode { @@ -51,7 +51,7 @@ func checkUseInteractiveMode() (bool, error) { } } -func runService(set service.AppSettings) error { +func runService(set service.CollectorSettings) error { // do not need to supply service name when startup is invoked through Service Control Manager directly if err := svc.Run("", service.NewWindowsService(set)); err != nil { return fmt.Errorf("failed to start service %w", err) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_fields.go b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go index 2486ab40c9d..66c0ccde953 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/base_fields.go +++ b/internal/otel_collector/cmd/pdatagen/internal/base_fields.go @@ -53,6 +53,18 @@ func (ms ${structName}) Set${fieldName}(v ${returnType}) { (*ms.orig).${originFieldName} = v }` +const accessorsPrimitiveAsDoubleTemplate = `// ${fieldName} returns the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) ${fieldName}() ${returnType} { + return (*ms.orig).GetAsDouble() +} + +// Set${fieldName} replaces the ${lowerFieldName} associated with this ${structName}. +func (ms ${structName}) Set${fieldName}(v ${returnType}) { + (*ms.orig).${originFieldName} = &${originFullName}_AsDouble{ + AsDouble: v, + } +}` + const accessorsPrimitiveTestTemplate = `func Test${structName}_${fieldName}(t *testing.T) { ms := New${structName}() assert.EqualValues(t, ${defaultVal}, ms.${fieldName}()) @@ -393,3 +405,60 @@ func (one oneofField) generateCopyToValue(sb *strings.Builder) { } var _ baseField = (*oneofField)(nil) + +type primitiveAsDoubleField struct { + originFullName string + fieldName string + originFieldName string + returnType string + defaultVal string + testVal string +} + +func (pf *primitiveAsDoubleField) generateAccessors(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveAsDoubleTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "fieldName": + return pf.fieldName + case "lowerFieldName": + return strings.ToLower(pf.fieldName) + case "returnType": + return pf.returnType + case "originFieldName": + return pf.originFieldName + case "originFullName": + return pf.originFullName + default: + panic(name) + } + })) +} + +func (pf *primitiveAsDoubleField) generateAccessorsTest(ms baseStruct, sb *strings.Builder) { + sb.WriteString(os.Expand(accessorsPrimitiveTestTemplate, func(name string) string { + switch name { + case "structName": + return ms.getName() + case "defaultVal": + return pf.defaultVal + case "fieldName": + return pf.fieldName + case "testValue": + return pf.testVal + default: + panic(name) + } + })) +} + +func (pf *primitiveAsDoubleField) generateSetWithTestValue(sb *strings.Builder) { + sb.WriteString("\ttv.Set" + pf.fieldName + "(" + pf.testVal + ")") +} + +func (pf *primitiveAsDoubleField) generateCopyToValue(sb *strings.Builder) { + sb.WriteString("\tdest.Set" + pf.fieldName + "(ms." + pf.fieldName + "())") +} + +var _ baseField = (*primitiveField)(nil) diff --git a/internal/otel_collector/cmd/pdatagen/internal/base_slices.go b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go index 66057197bc5..bd3a88172f1 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/base_slices.go +++ b/internal/otel_collector/cmd/pdatagen/internal/base_slices.go @@ -104,9 +104,10 @@ const commonSliceGenerateTest = `func generateTest${structName}() ${structName} } func fillTest${structName}(tv ${structName}) { - tv.Resize(7) - for i := 0; i < tv.Len(); i++ { - fillTest${elementName}(tv.At(i)) + l := 7 + tv.EnsureCapacity(l) + for i := 0; i < l; i++ { + fillTest${elementName}(tv.AppendEmpty()) } }` @@ -119,7 +120,7 @@ const slicePtrTemplate = `// ${structName} logically represents a slice of ${ele // Important: zero-initialized instance is not valid for use. type ${structName} struct { // orig points to the slice ${originName} field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. + // We use pointer-to-slice to be able to modify it in functions like EnsureCapacity. orig *[]*${originName} } @@ -128,7 +129,7 @@ func new${structName}(orig *[]*${originName}) ${structName} { } // New${structName} creates a ${structName} with 0 elements. -// Can use "Resize" to initialize with a given length. +// Can use "EnsureCapacity" to initialize with a given capacity. func New${structName}() ${structName} { orig := []*${originName}(nil) return ${structName}{&orig} @@ -172,6 +173,28 @@ func (es ${structName}) CopyTo(dest ${structName}) { *dest.orig = wrappers } +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ${structName}) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*${originName}, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + // Resize is an operation that resizes the slice: // 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. // 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. @@ -183,6 +206,8 @@ func (es ${structName}) CopyTo(dest ${structName}) { // e := es.At(i) // // Here should set all the values for e. // } +// +// Deprecated: Use EnsureCapacity() and AppendEmpty() instead. func (es ${structName}) Resize(newLen int) { oldLen := len(*es.orig) oldCap := cap(*es.orig) @@ -190,13 +215,11 @@ func (es ${structName}) Resize(newLen int) { *es.orig = (*es.orig)[:newLen:oldCap] return } - if newLen > oldCap { newOrig := make([]*${originName}, oldLen, newLen) copy(newOrig, *es.orig) *es.orig = newOrig } - // Add extra empty elements to the array. extraOrigs := make([]${originName}, newLen-oldLen) for i := range extraOrigs { @@ -204,21 +227,12 @@ func (es ${structName}) Resize(newLen int) { } } -// Append will increase the length of the ${structName} by one and set the -// given ${elementName} at that new position. The original ${elementName} -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ${structName}) Append(e ${elementName}) { - *es.orig = append(*es.orig, e.orig) -} - // AppendEmpty will append to the end of the slice an empty ${elementName}. // It returns the newly added ${elementName}. func (es ${structName}) AppendEmpty() ${elementName} { *es.orig = append(*es.orig, &${originName}{}) return es.At(es.Len() - 1) -}` +} ` const slicePtrTestTemplate = `func Test${structName}(t *testing.T) { es := New${structName}() @@ -226,14 +240,15 @@ const slicePtrTestTemplate = `func Test${structName}(t *testing.T) { es = new${structName}(&[]*${originName}{}) assert.EqualValues(t, 0, es.Len()) - es.Resize(7) + es.EnsureCapacity(7) emptyVal := new${elementName}(&${originName}{}) testVal := generateTest${elementName}() - assert.EqualValues(t, 7, es.Len()) + assert.EqualValues(t, 7, cap(*es.orig)) for i := 0; i < es.Len(); i++ { - assert.EqualValues(t, emptyVal, es.At(i)) - fillTest${elementName}(es.At(i)) - assert.EqualValues(t, testVal, es.At(i)) + el := es.AppendEmpty() + assert.EqualValues(t, emptyVal, el) + fillTest${elementName}(el) + assert.EqualValues(t, testVal, el) } } @@ -252,59 +267,38 @@ func Test${structName}_CopyTo(t *testing.T) { assert.EqualValues(t, generateTest${structName}(), dest) } -func Test${structName}_Resize(t *testing.T) { +func Test${structName}_EnsureCapacity(t *testing.T) { es := generateTest${structName}() - emptyVal := new${elementName}(&${originName}{}) - // Test Resize less elements. - const resizeSmallLen = 4 - expectedEs := make(map[*${originName}]bool, resizeSmallLen) - for i := 0; i < resizeSmallLen; i++ { + // Test ensure smaller capacity. + const ensureSmallLen = 4 + expectedEs := make(map[*${originName}]bool) + for i := 0; i < es.Len(); i++ { expectedEs[es.At(i).orig] = true } - assert.Equal(t, resizeSmallLen, len(expectedEs)) - es.Resize(resizeSmallLen) - assert.Equal(t, resizeSmallLen, es.Len()) - foundEs := make(map[*${originName}]bool, resizeSmallLen) + assert.Equal(t, es.Len(), len(expectedEs)) + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, es.Len()) for i := 0; i < es.Len(); i++ { foundEs[es.At(i).orig] = true } assert.EqualValues(t, expectedEs, foundEs) - // Test Resize more elements. - const resizeLargeLen = 7 + // Test ensure larger capacity + const ensureLargeLen = 9 oldLen := es.Len() expectedEs = make(map[*${originName}]bool, oldLen) for i := 0; i < oldLen; i++ { expectedEs[es.At(i).orig] = true } assert.Equal(t, oldLen, len(expectedEs)) - es.Resize(resizeLargeLen) - assert.Equal(t, resizeLargeLen, es.Len()) + es.EnsureCapacity(ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) foundEs = make(map[*${originName}]bool, oldLen) for i := 0; i < oldLen; i++ { foundEs[es.At(i).orig] = true } assert.EqualValues(t, expectedEs, foundEs) - for i := oldLen; i < resizeLargeLen; i++ { - assert.EqualValues(t, emptyVal, es.At(i)) - } - - // Test Resize 0 elements. - es.Resize(0) - assert.Equal(t, 0, es.Len()) -} - -func Test${structName}_Append(t *testing.T) { - es := generateTest${structName}() - - es.AppendEmpty() - assert.EqualValues(t, &${originName}{}, es.At(7).orig) - - value := generateTest${elementName}() - es.Append(value) - assert.EqualValues(t, value.orig, es.At(8).orig) - - assert.Equal(t, 9, es.Len()) }` const sliceValueTemplate = `// ${structName} logically represents a slice of ${elementName}. @@ -316,7 +310,7 @@ const sliceValueTemplate = `// ${structName} logically represents a slice of ${e // Important: zero-initialized instance is not valid for use. type ${structName} struct { // orig points to the slice ${originName} field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. + // We use pointer-to-slice to be able to modify it in functions like EnsureCapacity. orig *[]${originName} } @@ -325,7 +319,7 @@ func new${structName}(orig *[]${originName}) ${structName} { } // New${structName} creates a ${structName} with 0 elements. -// Can use "Resize" to initialize with a given length. +// Can use "EnsureCapacity" to initialize with a given capacity. func New${structName}() ${structName} { orig := []${originName}(nil) return ${structName}{&orig} @@ -364,6 +358,28 @@ func (es ${structName}) CopyTo(dest ${structName}) { } } +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ${structName} can be initialized: +// es := New${structName}() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ${structName}) EnsureCapacity(newCap int) { + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]${originName}, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + // Resize is an operation that resizes the slice: // 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. // 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. @@ -375,6 +391,8 @@ func (es ${structName}) CopyTo(dest ${structName}) { // e := es.At(i) // // Here should set all the values for e. // } +// +// Deprecated: Use EnsureCapacity() and AppendEmpty() instead. func (es ${structName}) Resize(newLen int) { oldLen := len(*es.orig) oldCap := cap(*es.orig) @@ -382,13 +400,11 @@ func (es ${structName}) Resize(newLen int) { *es.orig = (*es.orig)[:newLen:oldCap] return } - if newLen > oldCap { newOrig := make([]${originName}, oldLen, newLen) copy(newOrig, *es.orig) *es.orig = newOrig } - // Add extra empty elements to the array. empty := ${originName}{} for i := oldLen; i < newLen; i++ { @@ -396,15 +412,6 @@ func (es ${structName}) Resize(newLen int) { } } -// Append will increase the length of the ${structName} by one and set the -// given ${elementName} at that new position. The original ${elementName} -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ${structName}) Append(e ${elementName}) { - *es.orig = append(*es.orig, *e.orig) -} - // AppendEmpty will append to the end of the slice an empty ${elementName}. // It returns the newly added ${elementName}. func (es ${structName}) AppendEmpty() ${elementName} { @@ -418,14 +425,15 @@ const sliceValueTestTemplate = `func Test${structName}(t *testing.T) { es = new${structName}(&[]${originName}{}) assert.EqualValues(t, 0, es.Len()) - es.Resize(7) + es.EnsureCapacity(7) emptyVal := new${elementName}(&${originName}{}) testVal := generateTest${elementName}() - assert.EqualValues(t, 7, es.Len()) + assert.EqualValues(t, 7, cap(*es.orig)) for i := 0; i < es.Len(); i++ { - assert.EqualValues(t, emptyVal, es.At(i)) - fillTest${elementName}(es.At(i)) - assert.EqualValues(t, testVal, es.At(i)) + el := es.AppendEmpty() + assert.EqualValues(t, emptyVal, el) + fillTest${elementName}(el) + assert.EqualValues(t, testVal, el) } } @@ -444,59 +452,29 @@ func Test${structName}_CopyTo(t *testing.T) { assert.EqualValues(t, generateTest${structName}(), dest) } -func Test${structName}_Resize(t *testing.T) { +func Test${structName}_EnsureCapacity(t *testing.T) { es := generateTest${structName}() - emptyVal := new${elementName}(&${originName}{}) - // Test Resize less elements. - const resizeSmallLen = 4 - expectedEs := make(map[*${originName}]bool, resizeSmallLen) - for i := 0; i < resizeSmallLen; i++ { + // Test ensure smaller capacity. + const ensureSmallLen = 4 + expectedEs := make(map[*${originName}]bool) + for i := 0; i < es.Len(); i++ { expectedEs[es.At(i).orig] = true } - assert.Equal(t, resizeSmallLen, len(expectedEs)) - es.Resize(resizeSmallLen) - assert.Equal(t, resizeSmallLen, es.Len()) - foundEs := make(map[*${originName}]bool, resizeSmallLen) + assert.Equal(t, es.Len(), len(expectedEs)) + es.EnsureCapacity(ensureSmallLen) + assert.Less(t, ensureSmallLen, es.Len()) + foundEs := make(map[*${originName}]bool, es.Len()) for i := 0; i < es.Len(); i++ { foundEs[es.At(i).orig] = true } assert.EqualValues(t, expectedEs, foundEs) - // Test Resize more elements. - const resizeLargeLen = 7 + // Test ensure larger capacity + const ensureLargeLen = 9 oldLen := es.Len() - expectedEs = make(map[*${originName}]bool, oldLen) - for i := 0; i < oldLen; i++ { - expectedEs[es.At(i).orig] = true - } assert.Equal(t, oldLen, len(expectedEs)) - es.Resize(resizeLargeLen) - assert.Equal(t, resizeLargeLen, es.Len()) - foundEs = make(map[*${originName}]bool, oldLen) - for i := 0; i < oldLen; i++ { - foundEs[es.At(i).orig] = true - } - assert.EqualValues(t, expectedEs, foundEs) - for i := oldLen; i < resizeLargeLen; i++ { - assert.EqualValues(t, emptyVal, es.At(i)) - } - - // Test Resize 0 elements. - es.Resize(0) - assert.Equal(t, 0, es.Len()) -} - -func Test${structName}_Append(t *testing.T) { - es := generateTest${structName}() - - es.AppendEmpty() - assert.EqualValues(t, new${elementName}(&${originName}{}), es.At(7)) - - value := generateTest${elementName}() - es.Append(value) - assert.EqualValues(t, value, es.At(8)) - - assert.Equal(t, 9, es.Len()) + es.EnsureCapacity(ensureLargeLen) + assert.Equal(t, ensureLargeLen, cap(*es.orig)) }` type baseSlice interface { diff --git a/internal/otel_collector/cmd/pdatagen/internal/common_structs.go b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go index 7442caa7aee..f4c94a9d491 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/common_structs.go +++ b/internal/otel_collector/cmd/pdatagen/internal/common_structs.go @@ -17,14 +17,14 @@ package internal var commonFile = &File{ Name: "common", imports: []string{ - `otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1"`, + `otlpcommon "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1"`, }, testImports: []string{ `"testing"`, ``, `"github.com/stretchr/testify/assert"`, ``, - `otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1"`, + `otlpcommon "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1"`, }, structs: []baseStruct{ instrumentationLibrary, diff --git a/internal/otel_collector/cmd/pdatagen/internal/log_structs.go b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go index 08f88812a07..3b29744c793 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/log_structs.go +++ b/internal/otel_collector/cmd/pdatagen/internal/log_structs.go @@ -17,14 +17,14 @@ package internal var logFile = &File{ Name: "log", imports: []string{ - `otlplogs "go.opentelemetry.io/collector/internal/data/protogen/logs/v1"`, + `otlplogs "go.opentelemetry.io/collector/model/internal/data/protogen/logs/v1"`, }, testImports: []string{ `"testing"`, ``, `"github.com/stretchr/testify/assert"`, ``, - `otlplogs "go.opentelemetry.io/collector/internal/data/protogen/logs/v1"`, + `otlplogs "go.opentelemetry.io/collector/model/internal/data/protogen/logs/v1"`, }, structs: []baseStruct{ resourceLogsSlice, diff --git a/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go index c46e3f71e48..ffa923bcfbc 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go +++ b/internal/otel_collector/cmd/pdatagen/internal/metrics_structs.go @@ -17,14 +17,14 @@ package internal var metricsFile = &File{ Name: "metrics", imports: []string{ - `otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1"`, + `otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1"`, }, testImports: []string{ `"testing"`, ``, `"github.com/stretchr/testify/assert"`, ``, - `otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1"`, + `otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1"`, }, structs: []baseStruct{ resourceMetricsSlice, @@ -141,9 +141,9 @@ var intGauge = &messageValueStruct{ } var doubleGauge = &messageValueStruct{ - structName: "DoubleGauge", - description: "// DoubleGauge represents the type of a double scalar metric that always exports the \"current value\" for every data point.", - originFullName: "otlpmetrics.DoubleGauge", + structName: "Gauge", + description: "// Gauge represents the type of a double scalar metric that always exports the \"current value\" for every data point.", + originFullName: "otlpmetrics.Gauge", fields: []baseField{ &sliceField{ fieldName: "DataPoints", @@ -169,9 +169,9 @@ var intSum = &messageValueStruct{ } var doubleSum = &messageValueStruct{ - structName: "DoubleSum", - description: "// DoubleSum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval.", - originFullName: "otlpmetrics.DoubleSum", + structName: "Sum", + description: "// Sum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval.", + originFullName: "otlpmetrics.Sum", fields: []baseField{ aggregationTemporalityField, isMonotonicField, @@ -200,7 +200,7 @@ var intHistogram = &messageValueStruct{ var histogram = &messageValueStruct{ structName: "Histogram", description: "// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.", - originFullName: "otlpmetrics.DoubleHistogram", + originFullName: "otlpmetrics.Histogram", fields: []baseField{ aggregationTemporalityField, &sliceField{ @@ -214,7 +214,7 @@ var histogram = &messageValueStruct{ var summary = &messageValueStruct{ structName: "Summary", description: "// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.", - originFullName: "otlpmetrics.DoubleSummary", + originFullName: "otlpmetrics.Summary", fields: []baseField{ &sliceField{ fieldName: "DataPoints", @@ -250,12 +250,19 @@ var doubleDataPointSlice = &sliceOfPtrs{ var doubleDataPoint = &messageValueStruct{ structName: "DoubleDataPoint", description: "// DoubleDataPoint is a single data point in a timeseries that describes the time-varying value of a double metric.", - originFullName: "otlpmetrics.DoubleDataPoint", + originFullName: "otlpmetrics.NumberDataPoint", fields: []baseField{ labelsField, startTimeField, timeField, - valueFloat64Field, + &primitiveAsDoubleField{ + originFullName: "otlpmetrics.NumberDataPoint", + fieldName: "Value", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", + }, exemplarsField, }, } @@ -289,7 +296,7 @@ var histogramDataPointSlice = &sliceOfPtrs{ var histogramDataPoint = &messageValueStruct{ structName: "HistogramDataPoint", description: "// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.", - originFullName: "otlpmetrics.DoubleHistogramDataPoint", + originFullName: "otlpmetrics.HistogramDataPoint", fields: []baseField{ labelsField, startTimeField, @@ -310,7 +317,7 @@ var summaryDataPointSlice = &sliceOfPtrs{ var summaryDataPoint = &messageValueStruct{ structName: "SummaryDataPoint", description: "// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.", - originFullName: "otlpmetrics.DoubleSummaryDataPoint", + originFullName: "otlpmetrics.SummaryDataPoint", fields: []baseField{ labelsField, startTimeField, @@ -333,7 +340,7 @@ var quantileValuesSlice = &sliceOfPtrs{ var quantileValues = &messageValueStruct{ structName: "ValueAtQuantile", description: "// ValueAtQuantile is a quantile value within a Summary data point.", - originFullName: "otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile", + originFullName: "otlpmetrics.SummaryDataPoint_ValueAtQuantile", fields: []baseField{ quantileField, valueFloat64Field, @@ -363,7 +370,7 @@ var intExemplar = &messageValueStruct{ }, } -var exemplarSlice = &sliceOfValues{ +var exemplarSlice = &sliceOfPtrs{ structName: "ExemplarSlice", element: exemplar, } @@ -374,10 +381,17 @@ var exemplar = &messageValueStruct{ "// Exemplars also hold information about the environment when the measurement was recorded,\n" + "// for example the span and trace ID of the active span when the exemplar was recorded.", - originFullName: "otlpmetrics.DoubleExemplar", + originFullName: "otlpmetrics.Exemplar", fields: []baseField{ timeField, - valueFloat64Field, + &primitiveAsDoubleField{ + originFullName: "otlpmetrics.Exemplar", + fieldName: "Value", + originFieldName: "Value", + returnType: "float64", + defaultVal: "float64(0.0)", + testVal: "float64(17.13)", + }, &sliceField{ fieldName: "FilteredLabels", originFieldName: "FilteredLabels", diff --git a/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go index f5167e7c5d4..e14c0f3b3e4 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go +++ b/internal/otel_collector/cmd/pdatagen/internal/resource_structs.go @@ -17,7 +17,7 @@ package internal var resourceFile = &File{ Name: "resource", imports: []string{ - `otlpresource "go.opentelemetry.io/collector/internal/data/protogen/resource/v1"`, + `otlpresource "go.opentelemetry.io/collector/model/internal/data/protogen/resource/v1"`, }, testImports: []string{ `"testing"`, diff --git a/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go index 56b9367ad99..5b3cbc3802e 100644 --- a/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go +++ b/internal/otel_collector/cmd/pdatagen/internal/trace_structs.go @@ -17,14 +17,14 @@ package internal var traceFile = &File{ Name: "trace", imports: []string{ - `otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1"`, + `otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1"`, }, testImports: []string{ `"testing"`, ``, `"github.com/stretchr/testify/assert"`, ``, - `otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1"`, + `otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1"`, }, structs: []baseStruct{ resourceSpansSlice, diff --git a/internal/otel_collector/cmd/pdatagen/main.go b/internal/otel_collector/cmd/pdatagen/main.go index 02a4fcabad4..3b4101a7426 100644 --- a/internal/otel_collector/cmd/pdatagen/main.go +++ b/internal/otel_collector/cmd/pdatagen/main.go @@ -28,12 +28,12 @@ func check(e error) { func main() { for _, fp := range internal.AllFiles { - f, err := os.Create("./consumer/pdata/generated_" + fp.Name + ".go") + f, err := os.Create("./model/pdata/generated_" + fp.Name + ".go") check(err) _, err = f.WriteString(fp.GenerateFile()) check(err) check(f.Close()) - f, err = os.Create("./consumer/pdata/generated_" + fp.Name + "_test.go") + f, err = os.Create("./model/pdata/generated_" + fp.Name + "_test.go") check(err) _, err = f.WriteString(fp.GenerateTestFile()) check(err) diff --git a/internal/otel_collector/component/componenttest/nop_exporter.go b/internal/otel_collector/component/componenttest/nop_exporter.go index 7146329ad1a..2a474e6b02f 100644 --- a/internal/otel_collector/component/componenttest/nop_exporter.go +++ b/internal/otel_collector/component/componenttest/nop_exporter.go @@ -17,6 +17,7 @@ package componenttest import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -28,8 +29,9 @@ import ( // NewNopExporterCreateSettings returns a new nop settings for Create*Exporter functions. func NewNopExporterCreateSettings() component.ExporterCreateSettings { return component.ExporterCreateSettings{ - Logger: zap.NewNop(), - BuildInfo: component.DefaultBuildInfo(), + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), } } diff --git a/internal/otel_collector/component/componenttest/nop_extension.go b/internal/otel_collector/component/componenttest/nop_extension.go index e8e4d18079a..222cb940b91 100644 --- a/internal/otel_collector/component/componenttest/nop_extension.go +++ b/internal/otel_collector/component/componenttest/nop_extension.go @@ -17,6 +17,7 @@ package componenttest import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -27,8 +28,9 @@ import ( // NewNopExtensionCreateSettings returns a new nop settings for Create*Extension functions. func NewNopExtensionCreateSettings() component.ExtensionCreateSettings { return component.ExtensionCreateSettings{ - Logger: zap.NewNop(), - BuildInfo: component.DefaultBuildInfo(), + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), } } diff --git a/internal/otel_collector/component/componenttest/nop_processor.go b/internal/otel_collector/component/componenttest/nop_processor.go index ef8c2be1ee8..0e7fd1b5f07 100644 --- a/internal/otel_collector/component/componenttest/nop_processor.go +++ b/internal/otel_collector/component/componenttest/nop_processor.go @@ -17,6 +17,7 @@ package componenttest import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -29,8 +30,9 @@ import ( // NewNopProcessorCreateSettings returns a new nop settings for Create*Processor functions. func NewNopProcessorCreateSettings() component.ProcessorCreateSettings { return component.ProcessorCreateSettings{ - Logger: zap.NewNop(), - BuildInfo: component.DefaultBuildInfo(), + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), } } diff --git a/internal/otel_collector/component/componenttest/nop_receiver.go b/internal/otel_collector/component/componenttest/nop_receiver.go index 500126099ea..1f449e1ba0e 100644 --- a/internal/otel_collector/component/componenttest/nop_receiver.go +++ b/internal/otel_collector/component/componenttest/nop_receiver.go @@ -17,6 +17,7 @@ package componenttest import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -28,8 +29,9 @@ import ( // NewNopReceiverCreateSettings returns a new nop settings for Create*Receiver functions. func NewNopReceiverCreateSettings() component.ReceiverCreateSettings { return component.ReceiverCreateSettings{ - Logger: zap.NewNop(), - BuildInfo: component.DefaultBuildInfo(), + Logger: zap.NewNop(), + TracerProvider: trace.NewNoopTracerProvider(), + BuildInfo: component.DefaultBuildInfo(), } } diff --git a/internal/otel_collector/component/componenttest/shutdown_verifier.go b/internal/otel_collector/component/componenttest/shutdown_verifier.go index 47430e8e815..1c26575e114 100644 --- a/internal/otel_collector/component/componenttest/shutdown_verifier.go +++ b/internal/otel_collector/component/componenttest/shutdown_verifier.go @@ -58,7 +58,7 @@ func verifyTracesProcessorDoesntProduceAfterShutdown(t *testing.T, factory compo // The Shutdown() is done. It means the processor must have sent everything we // gave it to the next sink. - assert.EqualValues(t, generatedCount, nextSink.SpansCount()) + assert.EqualValues(t, generatedCount, nextSink.SpanCount()) } // VerifyProcessorShutdown verifies the processor doesn't produce telemetry data after shutdown. diff --git a/internal/otel_collector/component/exporter.go b/internal/otel_collector/component/exporter.go index 96da6c297dd..2e469350082 100644 --- a/internal/otel_collector/component/exporter.go +++ b/internal/otel_collector/component/exporter.go @@ -17,6 +17,7 @@ package component import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/config" @@ -52,6 +53,9 @@ type ExporterCreateSettings struct { // component to be used later as well. Logger *zap.Logger + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + // BuildInfo can be used by components for informational purposes BuildInfo BuildInfo } diff --git a/internal/otel_collector/component/extension.go b/internal/otel_collector/component/extension.go index 0c4a332443e..9f865c7f966 100644 --- a/internal/otel_collector/component/extension.go +++ b/internal/otel_collector/component/extension.go @@ -17,6 +17,7 @@ package component import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/config" @@ -52,6 +53,9 @@ type ExtensionCreateSettings struct { // component to be used later as well. Logger *zap.Logger + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + // BuildInfo can be used by components for informational purposes BuildInfo BuildInfo } diff --git a/internal/otel_collector/component/host.go b/internal/otel_collector/component/host.go index 73fd4406df2..56375b871ee 100644 --- a/internal/otel_collector/component/host.go +++ b/internal/otel_collector/component/host.go @@ -19,7 +19,7 @@ import ( ) // Host represents the entity that is hosting a Component. It is used to allow communication -// between the Component and its host (normally the service.Service is the host). +// between the Component and its host (normally the service.Collector is the host). type Host interface { // ReportFatalError is used to report to the host that the component // encountered a fatal error (i.e.: an error that the instance can't recover diff --git a/internal/otel_collector/component/processor.go b/internal/otel_collector/component/processor.go index 86d2d2f019f..c6035317b8c 100644 --- a/internal/otel_collector/component/processor.go +++ b/internal/otel_collector/component/processor.go @@ -17,6 +17,7 @@ package component import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component/componenterror" @@ -54,6 +55,9 @@ type ProcessorCreateSettings struct { // component to be used later as well. Logger *zap.Logger + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + // BuildInfo can be used by components for informational purposes BuildInfo BuildInfo } diff --git a/internal/otel_collector/component/receiver.go b/internal/otel_collector/component/receiver.go index 28a8164b970..fb2c885428a 100644 --- a/internal/otel_collector/component/receiver.go +++ b/internal/otel_collector/component/receiver.go @@ -17,6 +17,7 @@ package component import ( "context" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/config" @@ -61,6 +62,9 @@ type ReceiverCreateSettings struct { // component to be used later as well. Logger *zap.Logger + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + // BuildInfo can be used by components for informational purposes. BuildInfo BuildInfo } diff --git a/internal/otel_collector/config/configcheck/configcheck.go b/internal/otel_collector/config/configcheck/configcheck.go index 0b8c16b2167..842aeed4e0b 100644 --- a/internal/otel_collector/config/configcheck/configcheck.go +++ b/internal/otel_collector/config/configcheck/configcheck.go @@ -96,7 +96,7 @@ func validateConfigDataType(t reflect.Type) error { } default: // The config object can carry other types but they are not used when - // reading the configuration via viper so ignore them. Basically ignore: + // reading the configuration via koanf so ignore them. Basically ignore: // reflect.Uintptr, reflect.Chan, reflect.Func, reflect.Interface, and // reflect.UnsafePointer. } diff --git a/internal/otel_collector/config/configgrpc/README.md b/internal/otel_collector/config/configgrpc/README.md index 8e4e35b3a71..4f5c878eb6b 100644 --- a/internal/otel_collector/config/configgrpc/README.md +++ b/internal/otel_collector/config/configgrpc/README.md @@ -25,9 +25,8 @@ README](../configtls/README.md). - `timeout` - [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize) - [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize) -- [`per_rpc_auth`](https://pkg.go.dev/google.golang.org/grpc#PerRPCCredentials): the credentials to send for every RPC. Note that this isn't about sending the headers only during the initial connection as an `authorization` header under the `headers` would do: this is sent for every RPC performed during an established connection. - - `auth_type`: the authentication type, currently only `bearer` is supported - - `bearer_token`: the bearer token to use for each RPC call. + +Please note that [`per_rpc_auth`](https://pkg.go.dev/google.golang.org/grpc#PerRPCCredentials) which allows the credentials to send for every RPC is now moved to become an [extension](https://github.com/open-telemetry/opentelemetry-collector/tree/main/extension/bearertokenauthextension). Note that this feature isn't about sending the headers only during the initial connection as an `authorization` header under the `headers` would do: this is sent for every RPC performed during an established connection. Example: diff --git a/internal/otel_collector/config/configgrpc/configgrpc.go b/internal/otel_collector/config/configgrpc/configgrpc.go index 424e69a9bb9..4aeac672017 100644 --- a/internal/otel_collector/config/configgrpc/configgrpc.go +++ b/internal/otel_collector/config/configgrpc/configgrpc.go @@ -20,7 +20,8 @@ import ( "strings" "time" - "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel" "google.golang.org/grpc" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/credentials" @@ -226,6 +227,10 @@ func (gcs *GRPCClientSettings) ToDialOptions(ext map[config.ComponentID]componen opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, gcs.BalancerName))) } + // Enable OpenTelemetry observability plugin. + opts = append(opts, grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor())) + opts = append(opts, grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor())) + return opts, nil } @@ -316,9 +321,18 @@ func (gss *GRPCServerSettings) ToServerOption(ext map[config.ComponentID]compone ) } - // Enable OpenCensus observability plugin. - // TODO: Change to OpenTelemetry when collector is changed. - opts = append(opts, grpc.StatsHandler(&ocgrpc.ServerHandler{})) + // Enable OpenTelemetry observability plugin. + // TODO: Pass construct settings to have access to Tracer. + opts = append(opts, grpc.UnaryInterceptor( + otelgrpc.UnaryServerInterceptor( + otelgrpc.WithTracerProvider(otel.GetTracerProvider()), + otelgrpc.WithPropagators(otel.GetTextMapPropagator()), + ))) + opts = append(opts, grpc.StreamInterceptor( + otelgrpc.StreamServerInterceptor( + otelgrpc.WithTracerProvider(otel.GetTracerProvider()), + otelgrpc.WithPropagators(otel.GetTextMapPropagator()), + ))) return opts, nil } diff --git a/internal/otel_collector/config/confighttp/confighttp.go b/internal/otel_collector/config/confighttp/confighttp.go index 98215e4c099..039c210043e 100644 --- a/internal/otel_collector/config/confighttp/confighttp.go +++ b/internal/otel_collector/config/confighttp/confighttp.go @@ -22,6 +22,8 @@ import ( "time" "github.com/rs/cors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" @@ -195,16 +197,34 @@ func (hss *HTTPServerSettings) ToServer(handler http.Handler, opts ...ToServerOp for _, o := range opts { o(serverOpts) } + + handler = middleware.HTTPContentDecompressor( + handler, + middleware.WithErrorHandler(serverOpts.errorHandler), + ) + if len(hss.CorsOrigins) > 0 { - co := cors.Options{AllowedOrigins: hss.CorsOrigins, AllowedHeaders: hss.CorsHeaders} + co := cors.Options{ + AllowedOrigins: hss.CorsOrigins, + AllowCredentials: true, + AllowedHeaders: hss.CorsHeaders, + } handler = cors.New(co).Handler(handler) } // TODO: emit a warning when non-empty CorsHeaders and empty CorsOrigins. - handler = middleware.HTTPContentDecompressor( + // Enable OpenTelemetry observability plugin. + // TODO: Consider to use component ID string as prefix for all the operations. + handler = otelhttp.NewHandler( handler, - middleware.WithErrorHandler(serverOpts.errorHandler), + "", + otelhttp.WithTracerProvider(otel.GetTracerProvider()), + otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + return r.URL.Path + }), ) + return &http.Server{ Handler: handler, } diff --git a/internal/otel_collector/config/configloader/config.go b/internal/otel_collector/config/configloader/config.go index 7e172096c3c..c8dc9d5c27e 100644 --- a/internal/otel_collector/config/configloader/config.go +++ b/internal/otel_collector/config/configloader/config.go @@ -438,7 +438,7 @@ func parseIDNames(pipelineID config.ComponentID, componentType string, names []s return ret, nil } -// expandEnvConfig creates a new viper config with expanded values for all the values (simple, list or map value). +// expandEnvConfig updates a configparser.Parser with expanded values for all the values (simple, list or map value). // It does not expand the keys. func expandEnvConfig(v *configparser.Parser) { for _, k := range v.AllKeys() { @@ -458,6 +458,12 @@ func expandStringValues(value interface{}) interface{} { nslice = append(nslice, expandStringValues(vint)) } return nslice + case map[string]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + nmap[k] = expandStringValues(vint) + } + return nmap case map[interface{}]interface{}: nmap := make(map[interface{}]interface{}, len(v)) for k, vint := range v { diff --git a/internal/otel_collector/config/configparser/parser.go b/internal/otel_collector/config/configparser/parser.go index 92a082a0cb9..481a61ead55 100644 --- a/internal/otel_collector/config/configparser/parser.go +++ b/internal/otel_collector/config/configparser/parser.go @@ -17,31 +17,33 @@ package configparser import ( "fmt" "io" + "io/ioutil" "reflect" - "strings" + "github.com/knadh/koanf" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/confmap" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/providers/rawbytes" + "github.com/mitchellh/mapstructure" "github.com/spf13/cast" - "github.com/spf13/viper" ) const ( - // KeyDelimiter is used as the default key delimiter in the default viper instance. + // KeyDelimiter is used as the default key delimiter in the default koanf instance. KeyDelimiter = "::" ) // NewParser creates a new empty Parser instance. func NewParser() *Parser { - return &Parser{ - v: viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)), - } + return &Parser{k: koanf.New(KeyDelimiter)} } // NewParserFromFile creates a new Parser by reading the given file. func NewParserFromFile(fileName string) (*Parser, error) { // Read yaml config from file. p := NewParser() - p.v.SetConfigFile(fileName) - if err := p.v.ReadInConfig(); err != nil { + if err := p.k.Load(file.Provider(fileName), yaml.Parser()); err != nil { return nil, fmt.Errorf("unable to read the file %v: %w", fileName, err) } return p, nil @@ -49,71 +51,90 @@ func NewParserFromFile(fileName string) (*Parser, error) { // NewParserFromBuffer creates a new Parser by reading the given yaml buffer. func NewParserFromBuffer(buf io.Reader) (*Parser, error) { + content, err := ioutil.ReadAll(buf) + if err != nil { + return nil, err + } + p := NewParser() - p.v.SetConfigType("yaml") - if err := p.v.ReadConfig(buf); err != nil { + if err := p.k.Load(rawbytes.Provider(content), yaml.Parser()); err != nil { return nil, err } + return p, nil } // NewParserFromStringMap creates a parser from a map[string]interface{}. func NewParserFromStringMap(data map[string]interface{}) *Parser { p := NewParser() - // Cannot return error because the viper instance is empty. - _ = p.v.MergeConfigMap(data) + // Cannot return error because the koanf instance is empty. + _ = p.k.Load(confmap.Provider(data, KeyDelimiter), nil) return p } // Parser loads configuration. type Parser struct { - v *viper.Viper + k *koanf.Koanf } // AllKeys returns all keys holding a value, regardless of where they are set. // Nested keys are returned with a KeyDelimiter separator. func (l *Parser) AllKeys() []string { - return l.v.AllKeys() + return l.k.Keys() } // Unmarshal unmarshals the config into a struct. // Tags on the fields of the structure must be properly set. func (l *Parser) Unmarshal(rawVal interface{}) error { - return l.v.Unmarshal(rawVal) + decoder, err := mapstructure.NewDecoder(decoderConfig(rawVal)) + if err != nil { + return err + } + return decoder.Decode(l.ToStringMap()) } // UnmarshalExact unmarshals the config into a struct, erroring if a field is nonexistent. func (l *Parser) UnmarshalExact(intoCfg interface{}) error { - return l.v.UnmarshalExact(intoCfg) + dc := decoderConfig(intoCfg) + dc.ErrorUnused = true + decoder, err := mapstructure.NewDecoder(dc) + if err != nil { + return err + } + return decoder.Decode(l.ToStringMap()) } // Get can retrieve any value given the key to use. func (l *Parser) Get(key string) interface{} { - return l.v.Get(key) + return l.k.Get(key) } // Set sets the value for the key. func (l *Parser) Set(key string, value interface{}) { - l.v.Set(key, value) + // koanf doesn't offer a direct setting mechanism so merging is required. + merged := koanf.New(KeyDelimiter) + merged.Load(confmap.Provider(map[string]interface{}{key: value}, KeyDelimiter), nil) + l.k.Merge(merged) } // IsSet checks to see if the key has been set in any of the data locations. // IsSet is case-insensitive for a key. func (l *Parser) IsSet(key string) bool { - return l.v.IsSet(key) + return l.k.Exists(key) } // MergeStringMap merges the configuration from the given map with the existing config. // Note that the given map may be modified. func (l *Parser) MergeStringMap(cfg map[string]interface{}) error { - return l.v.MergeConfigMap(cfg) + toMerge := koanf.New(KeyDelimiter) + toMerge.Load(confmap.Provider(cfg, KeyDelimiter), nil) + return l.k.Merge(toMerge) } -// Sub returns new Parser instance representing a sub tree of this instance. +// Sub returns new Parser instance representing a sub-config of this instance. +// It returns an error is the sub-config is not a map (use Get()) and an empty Parser if +// none exists. func (l *Parser) Sub(key string) (*Parser, error) { - // Copied from the Viper but changed to use the same delimiter - // and return error if the sub is not a map. - // See https://github.com/spf13/viper/issues/871 data := l.Get(key) if data == nil { return NewParser(), nil @@ -122,60 +143,71 @@ func (l *Parser) Sub(key string) (*Parser, error) { if reflect.TypeOf(data).Kind() == reflect.Map { subParser := NewParser() // Cannot return error because the subv is empty. - _ = subParser.v.MergeConfigMap(cast.ToStringMap(data)) + _ = subParser.MergeStringMap(cast.ToStringMap(data)) return subParser, nil } return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v)", key, data, reflect.TypeOf(data).Kind()) } -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -// This function comes from Viper code https://github.com/spf13/viper/blob/5253694/util.go#L201-L230 -// It is used here because of https://github.com/spf13/viper/issues/819 -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // Intermediate key does not exist: - // create it and continue from there. - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // Intermediate key is a value: - // replace with a new map. - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 +// ToStringMap creates a map[string]interface{} from a Parser. +func (l *Parser) ToStringMap() map[string]interface{} { + return l.k.Raw() +} + +// decoderConfig returns a default mapstructure.DecoderConfig capable of parsing time.Duration +// and weakly converting config field values to primitive types. It also ensures that maps +// whose values are nil pointer structs resolved to the zero value of the target struct (see +// expandNilStructPointers). A decoder created from this mapstructure.DecoderConfig will decode +// its contents to the result argument. +func decoderConfig(result interface{}) *mapstructure.DecoderConfig { + return &mapstructure.DecoderConfig{ + Result: result, + Metadata: nil, + TagName: "mapstructure", + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + expandNilStructPointers(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), } - return m } -// ToStringMap creates a map[string]interface{} from a Parser. -func (l *Parser) ToStringMap() map[string]interface{} { - // This is equivalent to l.v.AllSettings() but it maps nil values. - // We can't use AllSettings here because of https://github.com/spf13/viper/issues/819 - - m := map[string]interface{}{} - // Start from the list of keys, and construct the map one value at a time. - for _, k := range l.v.AllKeys() { - value := l.v.Get(k) - path := strings.Split(k, KeyDelimiter) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // Set innermost value. - deepestMap[lastKey] = value +// In cases where a config has a mapping of something to a struct pointers +// we want nil values to resolve to a pointer to the zero value of the +// underlying struct just as we want nil values of a mapping of something +// to a struct to resolve to the zero value of that struct. +// +// e.g. given a config type: +// type Config struct { Thing *SomeStruct `mapstructure:"thing"` } +// +// and yaml of: +// config: +// thing: +// +// we want an unmarshalled Config to be equivalent to +// Config{Thing: &SomeStruct{}} instead of Config{Thing: nil} +func expandNilStructPointers() mapstructure.DecodeHookFunc { + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + // ensure we are dealing with map to map comparison + if from.Kind() == reflect.Map && to.Kind() == reflect.Map { + toElem := to.Type().Elem() + // ensure that map values are pointers to a struct + // (that may be nil and require manual setting w/ zero value) + if toElem.Kind() == reflect.Ptr && toElem.Elem().Kind() == reflect.Struct { + fromRange := from.MapRange() + for fromRange.Next() { + fromKey := fromRange.Key() + fromValue := fromRange.Value() + // ensure that we've run into a nil pointer instance + if fromValue.IsNil() { + newFromValue := reflect.New(toElem.Elem()) + from.SetMapIndex(fromKey, newFromValue) + } + } + } + } + return from.Interface(), nil } - return m } diff --git a/internal/otel_collector/config/configtls/README.md b/internal/otel_collector/config/configtls/README.md index ea87db3c287..60be861dcf0 100644 --- a/internal/otel_collector/config/configtls/README.md +++ b/internal/otel_collector/config/configtls/README.md @@ -34,6 +34,12 @@ won't use TLS at all. - `insecure_skip_verify` (default = false): whether to skip verifying the certificate or not. +Minimum and maximum TLS version can be set: + +- `min_version` (default = "1.0"): Minimum acceptable TLS version. + +- `max_version` (default = "1.3"): Maximum acceptable TLS version. + How TLS/mTLS is configured depends on whether configuring the client or server. See below for examples. @@ -63,6 +69,8 @@ exporters: ca_file: server.crt cert_file: client.crt key_file: client.key + min_version: "1.1" + max_version: "1.2" otlp/insecure: endpoint: myserver.local:55690 insecure: true diff --git a/internal/otel_collector/config/configtls/configtls.go b/internal/otel_collector/config/configtls/configtls.go index 641971f2f7b..7b6c215d3b8 100644 --- a/internal/otel_collector/config/configtls/configtls.go +++ b/internal/otel_collector/config/configtls/configtls.go @@ -30,10 +30,20 @@ type TLSSetting struct { // For a server this verifies client certificates. If empty uses system root CA. // (optional) CAFile string `mapstructure:"ca_file"` + // Path to the TLS cert to use for TLS required connections. (optional) CertFile string `mapstructure:"cert_file"` + // Path to the TLS key to use for TLS required connections. (optional) KeyFile string `mapstructure:"key_file"` + + // MinVersion sets the minimum TLS version that is acceptable. + // If not set, TLS 1.0 is used. (optional) + MinVersion string `mapstructure:"min_version"` + + // MaxVersion sets the maximum TLS version that is acceptable. + // If not set, TLS 1.3 is used. (optional) + MaxVersion string `mapstructure:"max_version"` } // TLSClientSetting contains TLS configurations that are specific to client @@ -96,16 +106,28 @@ func (c TLSSetting) loadTLSConfig() (*tls.Config, error) { var certificates []tls.Certificate if c.CertFile != "" && c.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(filepath.Clean(c.CertFile), filepath.Clean(c.KeyFile)) + var tlsCert tls.Certificate + tlsCert, err = tls.LoadX509KeyPair(filepath.Clean(c.CertFile), filepath.Clean(c.KeyFile)) if err != nil { return nil, fmt.Errorf("failed to load TLS cert and key: %w", err) } certificates = append(certificates, tlsCert) } + minTLS, err := convertVersion(c.MinVersion) + if err != nil { + return nil, fmt.Errorf("invalid TLS min_version: %w", err) + } + maxTLS, err := convertVersion(c.MaxVersion) + if err != nil { + return nil, fmt.Errorf("invalid TLS max_version: %w", err) + } + return &tls.Config{ RootCAs: certPool, Certificates: certificates, + MinVersion: minTLS, + MaxVersion: maxTLS, }, nil } @@ -153,3 +175,21 @@ func (c TLSServerSetting) LoadTLSConfig() (*tls.Config, error) { } return tlsCfg, nil } + +func convertVersion(v string) (uint16, error) { + if v == "" { + return 0, nil // default + } + val, ok := tlsVersions[v] + if !ok { + return 0, fmt.Errorf("unsupported TLS version: %q", v) + } + return val, nil +} + +var tlsVersions = map[string]uint16{ + "1.0": tls.VersionTLS10, + "1.1": tls.VersionTLS11, + "1.2": tls.VersionTLS12, + "1.3": tls.VersionTLS13, +} diff --git a/internal/otel_collector/config/internal/configsource/manager.go b/internal/otel_collector/config/internal/configsource/manager.go index 81ab85cfd05..ddcf8bf9909 100644 --- a/internal/otel_collector/config/internal/configsource/manager.go +++ b/internal/otel_collector/config/internal/configsource/manager.go @@ -309,6 +309,16 @@ func (m *Manager) expandStringValues(ctx context.Context, value interface{}) (in nslice = append(nslice, value) } return nslice, nil + case map[string]interface{}: + nmap := make(map[interface{}]interface{}, len(v)) + for k, vint := range v { + value, err := m.expandStringValues(ctx, vint) + if err != nil { + return nil, err + } + nmap[k] = value + } + return nmap, nil case map[interface{}]interface{}: nmap := make(map[interface{}]interface{}, len(v)) for k, vint := range v { diff --git a/internal/otel_collector/consumer/consumer.go b/internal/otel_collector/consumer/consumer.go index bf907473fc0..b079981c07a 100644 --- a/internal/otel_collector/consumer/consumer.go +++ b/internal/otel_collector/consumer/consumer.go @@ -17,7 +17,7 @@ package consumer import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Capabilities describes the capabilities of a Processor. diff --git a/internal/otel_collector/consumer/consumererror/permanent.go b/internal/otel_collector/consumer/consumererror/permanent.go index 4b2fe49de8d..c4b2c4fc0a9 100644 --- a/internal/otel_collector/consumer/consumererror/permanent.go +++ b/internal/otel_collector/consumer/consumererror/permanent.go @@ -22,7 +22,7 @@ type permanent struct { err error } -// Permanent wraps an error to indicate that it is a permanent error, i.e.: an +// Permanent wraps an error to indicate that it is a permanent error, i.e. an // error that will be always returned if its source receives the same inputs. func Permanent(err error) error { return permanent{err: err} @@ -37,7 +37,7 @@ func (p permanent) Unwrap() error { return p.err } -// IsPermanent checks if an error was wrapped with the Permanent function, that +// IsPermanent checks if an error was wrapped with the Permanent function, which // is used to indicate that a given error will always be returned in the case // that its sources receives the same input. func IsPermanent(err error) bool { diff --git a/internal/otel_collector/consumer/consumererror/signalerrors.go b/internal/otel_collector/consumer/consumererror/signalerrors.go index f57a7281e5a..a1b1f47c130 100644 --- a/internal/otel_collector/consumer/consumererror/signalerrors.go +++ b/internal/otel_collector/consumer/consumererror/signalerrors.go @@ -17,7 +17,7 @@ package consumererror import ( "errors" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Traces is an error that may carry associated Trace data for a subset of received data @@ -35,7 +35,7 @@ func NewTraces(err error, failed pdata.Traces) error { } } -// AsTraces finds the first error in err's chain that can be assigned to target. If such an error is found +// AsTraces finds the first error in err's chain that can be assigned to target. If such an error is found, // it is assigned to target and true is returned, otherwise false is returned. func AsTraces(err error, target *Traces) bool { if err == nil { @@ -69,7 +69,7 @@ func NewLogs(err error, failed pdata.Logs) error { } } -// AsLogs finds the first error in err's chain that can be assigned to target. If such an error is found +// AsLogs finds the first error in err's chain that can be assigned to target. If such an error is found, // it is assigned to target and true is returned, otherwise false is returned. func AsLogs(err error, target *Logs) bool { if err == nil { @@ -103,7 +103,7 @@ func NewMetrics(err error, failed pdata.Metrics) error { } } -// AsMetrics finds the first error in err's chain that can be assigned to target. If such an error is found +// AsMetrics finds the first error in err's chain that can be assigned to target. If such an error is found, // it is assigned to target and true is returned, otherwise false is returned. func AsMetrics(err error, target *Metrics) bool { if err == nil { diff --git a/internal/otel_collector/consumer/consumerhelper/common.go b/internal/otel_collector/consumer/consumerhelper/common.go index c314a3f3c71..bce9aba07a7 100644 --- a/internal/otel_collector/consumer/consumerhelper/common.go +++ b/internal/otel_collector/consumer/consumerhelper/common.go @@ -26,10 +26,10 @@ type baseConsumer struct { capabilities consumer.Capabilities } -// Option apply changes to internalOptions. +// Option applies changes to internalOptions. type Option func(*baseConsumer) -// WithCapabilities overrides the default GetCapabilities function for an processor. +// WithCapabilities overrides the default GetCapabilities function for a processor. // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities consumer.Capabilities) Option { return func(o *baseConsumer) { diff --git a/internal/otel_collector/consumer/consumerhelper/logs.go b/internal/otel_collector/consumer/consumerhelper/logs.go index b04b6ad51fc..3b29b3338d5 100644 --- a/internal/otel_collector/consumer/consumerhelper/logs.go +++ b/internal/otel_collector/consumer/consumerhelper/logs.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // ConsumeLogsFunc is a helper function that is similar to ConsumeLogs. diff --git a/internal/otel_collector/consumer/consumerhelper/metrics.go b/internal/otel_collector/consumer/consumerhelper/metrics.go index 4076c999dd7..6db7931807d 100644 --- a/internal/otel_collector/consumer/consumerhelper/metrics.go +++ b/internal/otel_collector/consumer/consumerhelper/metrics.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // ConsumeMetricsFunc is a helper function that is similar to ConsumeMetrics. diff --git a/internal/otel_collector/consumer/consumerhelper/traces.go b/internal/otel_collector/consumer/consumerhelper/traces.go index fd85f3eada6..4756fdfeded 100644 --- a/internal/otel_collector/consumer/consumerhelper/traces.go +++ b/internal/otel_collector/consumer/consumerhelper/traces.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // ConsumeTracesFunc is a helper function that is similar to ConsumeTraces. diff --git a/internal/otel_collector/consumer/consumertest/base_consumer.go b/internal/otel_collector/consumer/consumertest/base_consumer.go index 02b5b827aa6..420b205d919 100644 --- a/internal/otel_collector/consumer/consumertest/base_consumer.go +++ b/internal/otel_collector/consumer/consumertest/base_consumer.go @@ -20,6 +20,7 @@ import ( type nonMutatingConsumer struct{} +// Capabilities returns the base consumer capabilities. func (bc nonMutatingConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } diff --git a/internal/otel_collector/consumer/consumertest/consumer.go b/internal/otel_collector/consumer/consumertest/consumer.go index b009a1d41b6..ee5125b4ba5 100644 --- a/internal/otel_collector/consumer/consumertest/consumer.go +++ b/internal/otel_collector/consumer/consumertest/consumer.go @@ -18,13 +18,13 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Consumer is a convenience interface that implements all consumer interfaces. -// It has a private function on it to forbid external users to implement it, -// to allow us to add extra functions without breaking compatibility because -// nobody else implements this interface. +// It has a private function on it to forbid external users from implementing it +// and, as a result, to allow us to add extra functions without breaking +// compatibility. type Consumer interface { // Capabilities to implement the base consumer functionality. Capabilities() consumer.Capabilities diff --git a/internal/otel_collector/consumer/consumertest/err.go b/internal/otel_collector/consumer/consumertest/err.go index 58d049071b1..21d5a2d5fb0 100644 --- a/internal/otel_collector/consumer/consumertest/err.go +++ b/internal/otel_collector/consumer/consumertest/err.go @@ -17,7 +17,7 @@ package consumertest import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type errConsumer struct { @@ -39,7 +39,7 @@ func (er *errConsumer) ConsumeLogs(context.Context, pdata.Logs) error { return er.err } -// NewErr returns a Consumer that just drops all received data and returns no error. +// NewErr returns a Consumer that just drops all received data and returns the specified error to Consume* callers. func NewErr(err error) Consumer { return &errConsumer{err: err} } diff --git a/internal/otel_collector/consumer/consumertest/nop.go b/internal/otel_collector/consumer/consumertest/nop.go index 9415aeed3c7..106b1c090b3 100644 --- a/internal/otel_collector/consumer/consumertest/nop.go +++ b/internal/otel_collector/consumer/consumertest/nop.go @@ -17,7 +17,7 @@ package consumertest import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) var ( diff --git a/internal/otel_collector/consumer/consumertest/sink.go b/internal/otel_collector/consumer/consumertest/sink.go index 06ae77fdb6f..172417d90f0 100644 --- a/internal/otel_collector/consumer/consumertest/sink.go +++ b/internal/otel_collector/consumer/consumertest/sink.go @@ -19,16 +19,16 @@ import ( "sync" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // TracesSink is a consumer.Traces that acts like a sink that // stores all traces and allows querying them for testing. type TracesSink struct { nonMutatingConsumer - mu sync.Mutex - traces []pdata.Traces - spansCount int + mu sync.Mutex + traces []pdata.Traces + spanCount int } var _ consumer.Traces = (*TracesSink)(nil) @@ -39,7 +39,7 @@ func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error { defer ste.mu.Unlock() ste.traces = append(ste.traces, td) - ste.spansCount += td.SpanCount() + ste.spanCount += td.SpanCount() return nil } @@ -54,11 +54,11 @@ func (ste *TracesSink) AllTraces() []pdata.Traces { return copyTraces } -// SpansCount return the number of spans sent to this sink. -func (ste *TracesSink) SpansCount() int { +// SpanCount returns the number of spans sent to this sink. +func (ste *TracesSink) SpanCount() int { ste.mu.Lock() defer ste.mu.Unlock() - return ste.spansCount + return ste.spanCount } // Reset deletes any stored data. @@ -67,16 +67,16 @@ func (ste *TracesSink) Reset() { defer ste.mu.Unlock() ste.traces = nil - ste.spansCount = 0 + ste.spanCount = 0 } // MetricsSink is a consumer.Metrics that acts like a sink that // stores all metrics and allows querying them for testing. type MetricsSink struct { nonMutatingConsumer - mu sync.Mutex - metrics []pdata.Metrics - metricsCount int + mu sync.Mutex + metrics []pdata.Metrics + dataPointCount int } var _ consumer.Metrics = (*MetricsSink)(nil) @@ -87,7 +87,7 @@ func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) erro defer sme.mu.Unlock() sme.metrics = append(sme.metrics, md) - sme.metricsCount += md.MetricCount() + sme.dataPointCount += md.DataPointCount() return nil } @@ -102,11 +102,11 @@ func (sme *MetricsSink) AllMetrics() []pdata.Metrics { return copyMetrics } -// MetricsCount return the number of metrics stored by this sink since last Reset. -func (sme *MetricsSink) MetricsCount() int { +// DataPointCount returns the number of metrics stored by this sink since last Reset. +func (sme *MetricsSink) DataPointCount() int { sme.mu.Lock() defer sme.mu.Unlock() - return sme.metricsCount + return sme.dataPointCount } // Reset deletes any stored data. @@ -115,16 +115,16 @@ func (sme *MetricsSink) Reset() { defer sme.mu.Unlock() sme.metrics = nil - sme.metricsCount = 0 + sme.dataPointCount = 0 } // LogsSink is a consumer.Logs that acts like a sink that // stores all logs and allows querying them for testing. type LogsSink struct { nonMutatingConsumer - mu sync.Mutex - logs []pdata.Logs - logRecordsCount int + mu sync.Mutex + logs []pdata.Logs + logRecordCount int } var _ consumer.Logs = (*LogsSink)(nil) @@ -135,7 +135,7 @@ func (sle *LogsSink) ConsumeLogs(_ context.Context, ld pdata.Logs) error { defer sle.mu.Unlock() sle.logs = append(sle.logs, ld) - sle.logRecordsCount += ld.LogRecordCount() + sle.logRecordCount += ld.LogRecordCount() return nil } @@ -150,11 +150,11 @@ func (sle *LogsSink) AllLogs() []pdata.Logs { return copyLogs } -// LogRecordsCount return the number of log records stored by this sink since last Reset. -func (sle *LogsSink) LogRecordsCount() int { +// LogRecordCount returns the number of log records stored by this sink since last Reset. +func (sle *LogsSink) LogRecordCount() int { sle.mu.Lock() defer sle.mu.Unlock() - return sle.logRecordsCount + return sle.logRecordCount } // Reset deletes any stored data. @@ -163,5 +163,5 @@ func (sle *LogsSink) Reset() { defer sle.mu.Unlock() sle.logs = nil - sle.logRecordsCount = 0 + sle.logRecordCount = 0 } diff --git a/internal/otel_collector/consumer/pdata/common.go b/internal/otel_collector/consumer/pdata/common.go deleted file mode 100644 index 46513dffe6f..00000000000 --- a/internal/otel_collector/consumer/pdata/common.go +++ /dev/null @@ -1,814 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -// This file contains data structures that are common for all telemetry types, -// such as timestamps, attributes, etc. - -import ( - "sort" - - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" -) - -// AttributeValueType specifies the type of AttributeValue. -type AttributeValueType int32 - -const ( - AttributeValueTypeNull AttributeValueType = iota - AttributeValueTypeString - AttributeValueTypeInt - AttributeValueTypeDouble - AttributeValueTypeBool - AttributeValueTypeMap - AttributeValueTypeArray -) - -func (avt AttributeValueType) String() string { - switch avt { - case AttributeValueTypeNull: - return "NULL" - case AttributeValueTypeString: - return "STRING" - case AttributeValueTypeBool: - return "BOOL" - case AttributeValueTypeInt: - return "INT" - case AttributeValueTypeDouble: - return "DOUBLE" - case AttributeValueTypeMap: - return "MAP" - case AttributeValueTypeArray: - return "ARRAY" - } - return "" -} - -// AttributeValue represents a value of an attribute. Typically used in AttributeMap. -// Must use one of NewAttributeValue+ functions below to create new instances. -// -// Intended to be passed by value since internally it is just a pointer to actual -// value representation. For the same reason passing by value and calling setters -// will modify the original, e.g.: -// -// function f1(val AttributeValue) { val.SetIntVal(234) } -// function f2() { -// v := NewAttributeValueString("a string") -// f1(v) -// _ := v.Type() // this will return AttributeValueTypeInt -// } -// -// Important: zero-initialized instance is not valid for use. All AttributeValue functions bellow must -// be called only on instances that are created via NewAttributeValue+ functions. -type AttributeValue struct { - orig *otlpcommon.AnyValue -} - -func newAttributeValue(orig *otlpcommon.AnyValue) AttributeValue { - return AttributeValue{orig} -} - -// NewAttributeValueNull creates a new AttributeValue with a null value. -func NewAttributeValueNull() AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{}} -} - -// NewAttributeValueString creates a new AttributeValue with the given string value. -func NewAttributeValueString(v string) AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: v}}} -} - -// NewAttributeValueInt creates a new AttributeValue with the given int64 value. -func NewAttributeValueInt(v int64) AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: v}}} -} - -// NewAttributeValueDouble creates a new AttributeValue with the given float64 value. -func NewAttributeValueDouble(v float64) AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_DoubleValue{DoubleValue: v}}} -} - -// NewAttributeValueBool creates a new AttributeValue with the given bool value. -func NewAttributeValueBool(v bool) AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BoolValue{BoolValue: v}}} -} - -// NewAttributeValueMap creates a new AttributeValue of map type. -func NewAttributeValueMap() AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}}} -} - -// NewAttributeValueArray creates a new AttributeValue of array type. -func NewAttributeValueArray() AttributeValue { - return AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}}} -} - -// Type returns the type of the value for this AttributeValue. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) Type() AttributeValueType { - if a.orig.Value == nil { - return AttributeValueTypeNull - } - switch a.orig.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - return AttributeValueTypeString - case *otlpcommon.AnyValue_BoolValue: - return AttributeValueTypeBool - case *otlpcommon.AnyValue_IntValue: - return AttributeValueTypeInt - case *otlpcommon.AnyValue_DoubleValue: - return AttributeValueTypeDouble - case *otlpcommon.AnyValue_KvlistValue: - return AttributeValueTypeMap - case *otlpcommon.AnyValue_ArrayValue: - return AttributeValueTypeArray - } - return AttributeValueTypeNull -} - -// StringVal returns the string value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeString then returns empty string. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) StringVal() string { - return a.orig.GetStringValue() -} - -// IntVal returns the int64 value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeInt then returns int64(0). -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) IntVal() int64 { - return a.orig.GetIntValue() -} - -// DoubleVal returns the float64 value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeDouble then returns float64(0). -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) DoubleVal() float64 { - return a.orig.GetDoubleValue() -} - -// BoolVal returns the bool value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeBool then returns false. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) BoolVal() bool { - return a.orig.GetBoolValue() -} - -// MapVal returns the map value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeMap then returns an empty map. Note that modifying -// such empty map has no effect on this AttributeValue. -// -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) MapVal() AttributeMap { - kvlist := a.orig.GetKvlistValue() - if kvlist == nil { - return NewAttributeMap() - } - return newAttributeMap(&kvlist.Values) -} - -// ArrayVal returns the array value associated with this AttributeValue. -// If the Type() is not AttributeValueTypeArray then returns an empty array. Note that modifying -// such empty array has no effect on this AttributeValue. -// -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) ArrayVal() AnyValueArray { - arr := a.orig.GetArrayValue() - if arr == nil { - return NewAnyValueArray() - } - return newAnyValueArray(&arr.Values) -} - -// SetStringVal replaces the string value associated with this AttributeValue, -// it also changes the type to be AttributeValueTypeString. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) SetStringVal(v string) { - a.orig.Value = &otlpcommon.AnyValue_StringValue{StringValue: v} -} - -// SetIntVal replaces the int64 value associated with this AttributeValue, -// it also changes the type to be AttributeValueTypeInt. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) SetIntVal(v int64) { - a.orig.Value = &otlpcommon.AnyValue_IntValue{IntValue: v} -} - -// SetDoubleVal replaces the float64 value associated with this AttributeValue, -// it also changes the type to be AttributeValueTypeDouble. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) SetDoubleVal(v float64) { - a.orig.Value = &otlpcommon.AnyValue_DoubleValue{DoubleValue: v} -} - -// SetBoolVal replaces the bool value associated with this AttributeValue, -// it also changes the type to be AttributeValueTypeBool. -// Calling this function on zero-initialized AttributeValue will cause a panic. -func (a AttributeValue) SetBoolVal(v bool) { - a.orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: v} -} - -// copyTo copies the value to AnyValue. Will panic if dest is nil. -func (a AttributeValue) copyTo(dest *otlpcommon.AnyValue) { - switch v := a.orig.Value.(type) { - case *otlpcommon.AnyValue_KvlistValue: - kv, ok := dest.Value.(*otlpcommon.AnyValue_KvlistValue) - if !ok { - kv = &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}} - dest.Value = kv - } - if v.KvlistValue == nil { - kv.KvlistValue = nil - return - } - // Deep copy to dest. - newAttributeMap(&v.KvlistValue.Values).CopyTo(newAttributeMap(&kv.KvlistValue.Values)) - case *otlpcommon.AnyValue_ArrayValue: - av, ok := dest.Value.(*otlpcommon.AnyValue_ArrayValue) - if !ok { - av = &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}} - dest.Value = av - } - if v.ArrayValue == nil { - av.ArrayValue = nil - return - } - // Deep copy to dest. - newAnyValueArray(&v.ArrayValue.Values).CopyTo(newAnyValueArray(&av.ArrayValue.Values)) - default: - // Primitive immutable type, no need for deep copy. - dest.Value = a.orig.Value - } -} - -// CopyTo copies the attribute to a destination. -func (a AttributeValue) CopyTo(dest AttributeValue) { - a.copyTo(dest.orig) -} - -// Equal checks for equality, it returns true if the objects are equal otherwise false. -func (a AttributeValue) Equal(av AttributeValue) bool { - if a.orig == av.orig { - return true - } - - if a.orig.Value == nil || av.orig.Value == nil { - return a.orig.Value == av.orig.Value - } - - switch v := a.orig.Value.(type) { - case *otlpcommon.AnyValue_StringValue: - return v.StringValue == av.orig.GetStringValue() - case *otlpcommon.AnyValue_BoolValue: - return v.BoolValue == av.orig.GetBoolValue() - case *otlpcommon.AnyValue_IntValue: - return v.IntValue == av.orig.GetIntValue() - case *otlpcommon.AnyValue_DoubleValue: - return v.DoubleValue == av.orig.GetDoubleValue() - case *otlpcommon.AnyValue_ArrayValue: - vv := v.ArrayValue.GetValues() - avv := av.orig.GetArrayValue().GetValues() - if len(vv) != len(avv) { - return false - } - - for i, val := range avv { - val := val - av := newAttributeValue(&vv[i]) - - // According to the specification, array values must be scalar. - if avType := av.Type(); avType == AttributeValueTypeArray || avType == AttributeValueTypeMap { - return false - } - - if !av.Equal(newAttributeValue(&val)) { - return false - } - } - return true - } - - // TODO: handle MAP data type - return false -} - -func newAttributeKeyValueString(k string, v string) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - akv := AttributeValue{&orig.Value} - akv.SetStringVal(v) - return orig -} - -func newAttributeKeyValueInt(k string, v int64) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - akv := AttributeValue{&orig.Value} - akv.SetIntVal(v) - return orig -} - -func newAttributeKeyValueDouble(k string, v float64) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - akv := AttributeValue{&orig.Value} - akv.SetDoubleVal(v) - return orig -} - -func newAttributeKeyValueBool(k string, v bool) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - akv := AttributeValue{&orig.Value} - akv.SetBoolVal(v) - return orig -} - -func newAttributeKeyValueNull(k string) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - return orig -} - -func newAttributeKeyValue(k string, av AttributeValue) otlpcommon.KeyValue { - orig := otlpcommon.KeyValue{Key: k} - av.copyTo(&orig.Value) - return orig -} - -// AttributeMap stores a map of attribute keys to values. -type AttributeMap struct { - orig *[]otlpcommon.KeyValue -} - -// NewAttributeMap creates a AttributeMap with 0 elements. -func NewAttributeMap() AttributeMap { - orig := []otlpcommon.KeyValue(nil) - return AttributeMap{&orig} -} - -func newAttributeMap(orig *[]otlpcommon.KeyValue) AttributeMap { - return AttributeMap{orig} -} - -// InitFromMap overwrites the entire AttributeMap and reconstructs the AttributeMap -// with values from the given map[string]string. -// -// Returns the same instance to allow nicer code like: -// assert.EqualValues(t, NewAttributeMap().InitFromMap(map[string]AttributeValue{...}), actual) -func (am AttributeMap) InitFromMap(attrMap map[string]AttributeValue) AttributeMap { - if len(attrMap) == 0 { - *am.orig = []otlpcommon.KeyValue(nil) - return am - } - origs := make([]otlpcommon.KeyValue, len(attrMap)) - ix := 0 - for k, v := range attrMap { - origs[ix].Key = k - v.copyTo(&origs[ix].Value) - ix++ - } - *am.orig = origs - return am -} - -// Clear erases any existing entries in this AttributeMap instance. -func (am AttributeMap) Clear() { - *am.orig = nil -} - -// EnsureCapacity increases the capacity of this AttributeMap instance, if necessary, -// to ensure that it can hold at least the number of elements specified by the capacity argument. -func (am AttributeMap) EnsureCapacity(capacity int) { - if capacity <= cap(*am.orig) { - return - } - oldOrig := *am.orig - *am.orig = make([]otlpcommon.KeyValue, 0, capacity) - copy(*am.orig, oldOrig) -} - -// Get returns the AttributeValue associated with the key and true. Returned -// AttributeValue is not a copy, it is a reference to the value stored in this map. -// It is allowed to modify the returned value using AttributeValue.Set* functions. -// Such modification will be applied to the value stored in this map. -// -// If the key does not exist returns an invalid instance of the KeyValue and false. -// Calling any functions on the returned invalid instance will cause a panic. -func (am AttributeMap) Get(key string) (AttributeValue, bool) { - for i := range *am.orig { - akv := &(*am.orig)[i] - if akv.Key == key { - return AttributeValue{&akv.Value}, true - } - } - return AttributeValue{nil}, false -} - -// Delete deletes the entry associated with the key and returns true if the key -// was present in the map, otherwise returns false. -func (am AttributeMap) Delete(key string) bool { - for i := range *am.orig { - akv := &(*am.orig)[i] - if akv.Key == key { - *akv = (*am.orig)[len(*am.orig)-1] - *am.orig = (*am.orig)[:len(*am.orig)-1] - return true - } - } - return false -} - -// Insert adds the AttributeValue to the map when the key does not exist. -// No action is applied to the map where the key already exists. -// -// Calling this function with a zero-initialized AttributeValue struct will cause a panic. -// -// Important: this function should not be used if the caller has access to -// the raw value to avoid an extra allocation. -func (am AttributeMap) Insert(k string, v AttributeValue) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValue(k, v)) - } -} - -// InsertNull adds a null Value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (am AttributeMap) InsertNull(k string) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValueNull(k)) - } -} - -// InsertString adds the string Value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (am AttributeMap) InsertString(k string, v string) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValueString(k, v)) - } -} - -// InsertInt adds the int Value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (am AttributeMap) InsertInt(k string, v int64) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValueInt(k, v)) - } -} - -// InsertDouble adds the double Value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (am AttributeMap) InsertDouble(k string, v float64) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValueDouble(k, v)) - } -} - -// InsertBool adds the bool Value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (am AttributeMap) InsertBool(k string, v bool) { - if _, existing := am.Get(k); !existing { - *am.orig = append(*am.orig, newAttributeKeyValueBool(k, v)) - } -} - -// Update updates an existing AttributeValue with a value. -// No action is applied to the map where the key does not exist. -// -// Calling this function with a zero-initialized AttributeValue struct will cause a panic. -// -// Important: this function should not be used if the caller has access to -// the raw value to avoid an extra allocation. -func (am AttributeMap) Update(k string, v AttributeValue) { - if av, existing := am.Get(k); existing { - v.copyTo(av.orig) - } -} - -// UpdateString updates an existing string Value with a value. -// No action is applied to the map where the key does not exist. -func (am AttributeMap) UpdateString(k string, v string) { - if av, existing := am.Get(k); existing { - av.SetStringVal(v) - } -} - -// UpdateInt updates an existing int Value with a value. -// No action is applied to the map where the key does not exist. -func (am AttributeMap) UpdateInt(k string, v int64) { - if av, existing := am.Get(k); existing { - av.SetIntVal(v) - } -} - -// UpdateDouble updates an existing double Value with a value. -// No action is applied to the map where the key does not exist. -func (am AttributeMap) UpdateDouble(k string, v float64) { - if av, existing := am.Get(k); existing { - av.SetDoubleVal(v) - } -} - -// UpdateBool updates an existing bool Value with a value. -// No action is applied to the map where the key does not exist. -func (am AttributeMap) UpdateBool(k string, v bool) { - if av, existing := am.Get(k); existing { - av.SetBoolVal(v) - } -} - -// Upsert performs the Insert or Update action. The AttributeValue is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -// -// Calling this function with a zero-initialized AttributeValue struct will cause a panic. -// -// Important: this function should not be used if the caller has access to -// the raw value to avoid an extra allocation. -func (am AttributeMap) Upsert(k string, v AttributeValue) { - if av, existing := am.Get(k); existing { - v.copyTo(av.orig) - } else { - *am.orig = append(*am.orig, newAttributeKeyValue(k, v)) - } -} - -// UpsertString performs the Insert or Update action. The AttributeValue is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -func (am AttributeMap) UpsertString(k string, v string) { - if av, existing := am.Get(k); existing { - av.SetStringVal(v) - } else { - *am.orig = append(*am.orig, newAttributeKeyValueString(k, v)) - } -} - -// UpsertInt performs the Insert or Update action. The int Value is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -func (am AttributeMap) UpsertInt(k string, v int64) { - if av, existing := am.Get(k); existing { - av.SetIntVal(v) - } else { - *am.orig = append(*am.orig, newAttributeKeyValueInt(k, v)) - } -} - -// UpsertDouble performs the Insert or Update action. The double Value is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -func (am AttributeMap) UpsertDouble(k string, v float64) { - if av, existing := am.Get(k); existing { - av.SetDoubleVal(v) - } else { - *am.orig = append(*am.orig, newAttributeKeyValueDouble(k, v)) - } -} - -// UpsertBool performs the Insert or Update action. The bool Value is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -func (am AttributeMap) UpsertBool(k string, v bool) { - if av, existing := am.Get(k); existing { - av.SetBoolVal(v) - } else { - *am.orig = append(*am.orig, newAttributeKeyValueBool(k, v)) - } -} - -// Sort sorts the entries in the AttributeMap so two instances can be compared. -// Returns the same instance to allow nicer code like: -// assert.EqualValues(t, expected.Sort(), actual.Sort()) -func (am AttributeMap) Sort() AttributeMap { - // Intention is to move the nil values at the end. - sort.SliceStable(*am.orig, func(i, j int) bool { - return (*am.orig)[i].Key < (*am.orig)[j].Key - }) - return am -} - -// Len returns the length of this map. -// -// Because the AttributeMap is represented internally by a slice of pointers, and the data are comping from the wire, -// it is possible that when iterating using "Range" to get access to fewer elements because nil elements are skipped. -func (am AttributeMap) Len() int { - return len(*am.orig) -} - -// Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration. -// -// Example: -// -// it := sm.Range(func(k string, v AttributeValue) { -// ... -// }) -func (am AttributeMap) Range(f func(k string, v AttributeValue) bool) { - for i := range *am.orig { - kv := &(*am.orig)[i] - if !f(kv.Key, AttributeValue{&kv.Value}) { - break - } - } -} - -// CopyTo copies all elements from the current map to the dest. -func (am AttributeMap) CopyTo(dest AttributeMap) { - newLen := len(*am.orig) - oldCap := cap(*dest.orig) - if newLen <= oldCap { - // New slice fits in existing slice, no need to reallocate. - *dest.orig = (*dest.orig)[:newLen:oldCap] - for i := range *am.orig { - akv := &(*am.orig)[i] - destAkv := &(*dest.orig)[i] - destAkv.Key = akv.Key - AttributeValue{&akv.Value}.copyTo(&destAkv.Value) - } - return - } - - // New slice is bigger than exist slice. Allocate new space. - origs := make([]otlpcommon.KeyValue, len(*am.orig)) - for i := range *am.orig { - akv := &(*am.orig)[i] - origs[i].Key = akv.Key - AttributeValue{&akv.Value}.copyTo(&origs[i].Value) - } - *dest.orig = origs -} - -// StringMap stores a map of attribute keys to values. -type StringMap struct { - orig *[]otlpcommon.StringKeyValue -} - -// NewStringMap creates a StringMap with 0 elements. -func NewStringMap() StringMap { - orig := []otlpcommon.StringKeyValue(nil) - return StringMap{&orig} -} - -func newStringMap(orig *[]otlpcommon.StringKeyValue) StringMap { - return StringMap{orig} -} - -// InitFromMap overwrites the entire StringMap and reconstructs the StringMap -// with values from the given map[string]string. -// -// Returns the same instance to allow nicer code like: -// assert.EqualValues(t, NewStringMap().InitFromMap(map[string]string{...}), actual) -func (sm StringMap) InitFromMap(attrMap map[string]string) StringMap { - if len(attrMap) == 0 { - *sm.orig = []otlpcommon.StringKeyValue(nil) - return sm - } - origs := make([]otlpcommon.StringKeyValue, len(attrMap)) - ix := 0 - for k, v := range attrMap { - origs[ix].Key = k - origs[ix].Value = v - ix++ - } - *sm.orig = origs - return sm -} - -// Clear erases any existing entries in this StringMap instance. -func (sm StringMap) Clear() { - *sm.orig = nil -} - -// EnsureCapacity increases the capacity of this StringMap instance, if necessary, -// to ensure that it can hold at least the number of elements specified by the capacity argument. -func (sm StringMap) EnsureCapacity(capacity int) { - if capacity <= cap(*sm.orig) { - return - } - oldOrig := *sm.orig - *sm.orig = make([]otlpcommon.StringKeyValue, 0, capacity) - copy(*sm.orig, oldOrig) -} - -// Get returns the StringValue associated with the key and true, -// otherwise an invalid instance of the StringKeyValue and false. -// Calling any functions on the returned invalid instance will cause a panic. -func (sm StringMap) Get(k string) (string, bool) { - skv, found := sm.get(k) - // GetValue handles the case where skv is nil. - return skv.GetValue(), found -} - -// Delete deletes the entry associated with the key and returns true if the key -// was present in the map, otherwise returns false. -func (sm StringMap) Delete(k string) bool { - for i := range *sm.orig { - skv := &(*sm.orig)[i] - if skv.Key == k { - (*sm.orig)[i] = (*sm.orig)[len(*sm.orig)-1] - *sm.orig = (*sm.orig)[:len(*sm.orig)-1] - return true - } - } - return false -} - -// Insert adds the string value to the map when the key does not exist. -// No action is applied to the map where the key already exists. -func (sm StringMap) Insert(k, v string) { - if _, existing := sm.Get(k); !existing { - *sm.orig = append(*sm.orig, newStringKeyValue(k, v)) - } -} - -// Update updates an existing string value with a value. -// No action is applied to the map where the key does not exist. -func (sm StringMap) Update(k, v string) { - if skv, existing := sm.get(k); existing { - skv.Value = v - } -} - -// Upsert performs the Insert or Update action. The string value is -// insert to the map that did not originally have the key. The key/value is -// updated to the map where the key already existed. -func (sm StringMap) Upsert(k, v string) { - if skv, existing := sm.get(k); existing { - skv.Value = v - } else { - *sm.orig = append(*sm.orig, newStringKeyValue(k, v)) - } -} - -// Len returns the length of this map. -// -// Because the AttributeMap is represented internally by a slice of pointers, and the data are comping from the wire, -// it is possible that when iterating using "Range" to get access to fewer elements because nil elements are skipped. -func (sm StringMap) Len() int { - return len(*sm.orig) -} - -// Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration. -// -// Example: -// -// it := sm.Range(func(k string, v StringValue) { -// ... -// }) -func (sm StringMap) Range(f func(k string, v string) bool) { - for i := range *sm.orig { - skv := &(*sm.orig)[i] - if !f(skv.Key, skv.Value) { - break - } - } -} - -// CopyTo copies all elements from the current map to the dest. -func (sm StringMap) CopyTo(dest StringMap) { - newLen := len(*sm.orig) - oldCap := cap(*dest.orig) - if newLen <= oldCap { - *dest.orig = (*dest.orig)[:newLen:oldCap] - } else { - *dest.orig = make([]otlpcommon.StringKeyValue, newLen) - } - - for i := range *sm.orig { - skv := &(*sm.orig)[i] - (*dest.orig)[i].Key = skv.Key - (*dest.orig)[i].Value = skv.Value - } -} - -func (sm StringMap) get(k string) (*otlpcommon.StringKeyValue, bool) { - for i := range *sm.orig { - skv := &(*sm.orig)[i] - if skv.Key == k { - return skv, true - } - } - return nil, false -} - -// Sort sorts the entries in the StringMap so two instances can be compared. -// Returns the same instance to allow nicer code like: -// assert.EqualValues(t, expected.Sort(), actual.Sort()) -func (sm StringMap) Sort() StringMap { - sort.SliceStable(*sm.orig, func(i, j int) bool { - // Intention is to move the nil values at the end. - return (*sm.orig)[i].Key < (*sm.orig)[j].Key - }) - return sm -} - -func newStringKeyValue(k, v string) otlpcommon.StringKeyValue { - return otlpcommon.StringKeyValue{Key: k, Value: v} -} diff --git a/internal/otel_collector/consumer/pdata/doc.go b/internal/otel_collector/consumer/pdata/doc.go deleted file mode 100644 index be88c2a78ea..00000000000 --- a/internal/otel_collector/consumer/pdata/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pdata (pipeline data) implements data structures that represent telemetry data in-memory. -// All data received is converted into this format and travels through the pipeline -// in this format and that is converted from this format by exporters when sending. -// -// Current implementation primarily uses OTLP ProtoBuf structs as the underlying data -// structures for many of of the declared structs. We keep a pointer to OTLP protobuf -// in the "orig" member field. This allows efficient translation to/from OTLP wire -// protocol. Note that the underlying data structure is kept private so that in the -// future we are free to make changes to it to make more optimal. -// -// Most of the internal data structures must be created via New* functions. Zero-initialized -// structures in most cases are not valid (read comments for each struct to know if it -// is the case). This is a slight deviation from idiomatic Go to avoid unnecessary -// pointer checks in dozens of functions which assume the invariant that "orig" member -// is non-nil. Several structures also provide New*Slice functions that allows to create -// more than one instance of the struct more efficiently instead of calling New* -// repeatedly. Use it where appropriate. -package pdata diff --git a/internal/otel_collector/consumer/pdata/generated_common.go b/internal/otel_collector/consumer/pdata/generated_common.go deleted file mode 100644 index a726420ccc0..00000000000 --- a/internal/otel_collector/consumer/pdata/generated_common.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run cmd/pdatagen/main.go". - -package pdata - -import ( - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" -) - -// InstrumentationLibrary is a message representing the instrumentation library information. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewInstrumentationLibrary function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibrary struct { - orig *otlpcommon.InstrumentationLibrary -} - -func newInstrumentationLibrary(orig *otlpcommon.InstrumentationLibrary) InstrumentationLibrary { - return InstrumentationLibrary{orig: orig} -} - -// NewInstrumentationLibrary creates a new empty InstrumentationLibrary. -// -// This must be used only in testing code since no "Set" method available. -func NewInstrumentationLibrary() InstrumentationLibrary { - return newInstrumentationLibrary(&otlpcommon.InstrumentationLibrary{}) -} - -// Name returns the name associated with this InstrumentationLibrary. -func (ms InstrumentationLibrary) Name() string { - return (*ms.orig).Name -} - -// SetName replaces the name associated with this InstrumentationLibrary. -func (ms InstrumentationLibrary) SetName(v string) { - (*ms.orig).Name = v -} - -// Version returns the version associated with this InstrumentationLibrary. -func (ms InstrumentationLibrary) Version() string { - return (*ms.orig).Version -} - -// SetVersion replaces the version associated with this InstrumentationLibrary. -func (ms InstrumentationLibrary) SetVersion(v string) { - (*ms.orig).Version = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms InstrumentationLibrary) CopyTo(dest InstrumentationLibrary) { - dest.SetName(ms.Name()) - dest.SetVersion(ms.Version()) -} - -// AnyValueArray logically represents a slice of AttributeValue. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewAnyValueArray function to create new instances. -// Important: zero-initialized instance is not valid for use. -type AnyValueArray struct { - // orig points to the slice otlpcommon.AnyValue field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]otlpcommon.AnyValue -} - -func newAnyValueArray(orig *[]otlpcommon.AnyValue) AnyValueArray { - return AnyValueArray{orig} -} - -// NewAnyValueArray creates a AnyValueArray with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewAnyValueArray() AnyValueArray { - orig := []otlpcommon.AnyValue(nil) - return AnyValueArray{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewAnyValueArray()". -func (es AnyValueArray) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es AnyValueArray) At(ix int) AttributeValue { - return newAttributeValue(&(*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es AnyValueArray) CopyTo(dest AnyValueArray) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - } else { - (*dest.orig) = make([]otlpcommon.AnyValue, srcLen) - } - - for i := range *es.orig { - newAttributeValue(&(*es.orig)[i]).CopyTo(newAttributeValue(&(*dest.orig)[i])) - } -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new AnyValueArray can be initialized: -// es := NewAnyValueArray() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es AnyValueArray) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]otlpcommon.AnyValue, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - empty := otlpcommon.AnyValue{} - for i := oldLen; i < newLen; i++ { - *es.orig = append(*es.orig, empty) - } -} - -// Append will increase the length of the AnyValueArray by one and set the -// given AttributeValue at that new position. The original AttributeValue -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es AnyValueArray) Append(e AttributeValue) { - *es.orig = append(*es.orig, *e.orig) -} - -// AppendEmpty will append to the end of the slice an empty AttributeValue. -// It returns the newly added AttributeValue. -func (es AnyValueArray) AppendEmpty() AttributeValue { - *es.orig = append(*es.orig, otlpcommon.AnyValue{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es AnyValueArray) MoveAndAppendTo(dest AnyValueArray) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es AnyValueArray) RemoveIf(f func(AttributeValue) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} diff --git a/internal/otel_collector/consumer/pdata/generated_log.go b/internal/otel_collector/consumer/pdata/generated_log.go deleted file mode 100644 index 7c1ed7e1544..00000000000 --- a/internal/otel_collector/consumer/pdata/generated_log.go +++ /dev/null @@ -1,651 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run cmd/pdatagen/main.go". - -package pdata - -import ( - otlplogs "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" -) - -// ResourceLogsSlice logically represents a slice of ResourceLogs. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewResourceLogsSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceLogsSlice struct { - // orig points to the slice otlplogs.ResourceLogs field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlplogs.ResourceLogs -} - -func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs) ResourceLogsSlice { - return ResourceLogsSlice{orig} -} - -// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewResourceLogsSlice() ResourceLogsSlice { - orig := []*otlplogs.ResourceLogs(nil) - return ResourceLogsSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewResourceLogsSlice()". -func (es ResourceLogsSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es ResourceLogsSlice) At(ix int) ResourceLogs { - return newResourceLogs((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs((*dest.orig)[i])) - } - return - } - origs := make([]otlplogs.ResourceLogs, srcLen) - wrappers := make([]*otlplogs.ResourceLogs, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newResourceLogs((*es.orig)[i]).CopyTo(newResourceLogs(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new ResourceLogsSlice can be initialized: -// es := NewResourceLogsSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es ResourceLogsSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlplogs.ResourceLogs, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlplogs.ResourceLogs, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the ResourceLogsSlice by one and set the -// given ResourceLogs at that new position. The original ResourceLogs -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ResourceLogsSlice) Append(e ResourceLogs) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty ResourceLogs. -// It returns the newly added ResourceLogs. -func (es ResourceLogsSlice) AppendEmpty() ResourceLogs { - *es.orig = append(*es.orig, &otlplogs.ResourceLogs{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// ResourceLogs is a collection of logs from a Resource. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewResourceLogs function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceLogs struct { - orig *otlplogs.ResourceLogs -} - -func newResourceLogs(orig *otlplogs.ResourceLogs) ResourceLogs { - return ResourceLogs{orig: orig} -} - -// NewResourceLogs creates a new empty ResourceLogs. -// -// This must be used only in testing code since no "Set" method available. -func NewResourceLogs() ResourceLogs { - return newResourceLogs(&otlplogs.ResourceLogs{}) -} - -// Resource returns the resource associated with this ResourceLogs. -func (ms ResourceLogs) Resource() Resource { - return newResource(&(*ms.orig).Resource) -} - -// InstrumentationLibraryLogs returns the InstrumentationLibraryLogs associated with this ResourceLogs. -func (ms ResourceLogs) InstrumentationLibraryLogs() InstrumentationLibraryLogsSlice { - return newInstrumentationLibraryLogsSlice(&(*ms.orig).InstrumentationLibraryLogs) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms ResourceLogs) CopyTo(dest ResourceLogs) { - ms.Resource().CopyTo(dest.Resource()) - ms.InstrumentationLibraryLogs().CopyTo(dest.InstrumentationLibraryLogs()) -} - -// InstrumentationLibraryLogsSlice logically represents a slice of InstrumentationLibraryLogs. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewInstrumentationLibraryLogsSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibraryLogsSlice struct { - // orig points to the slice otlplogs.InstrumentationLibraryLogs field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlplogs.InstrumentationLibraryLogs -} - -func newInstrumentationLibraryLogsSlice(orig *[]*otlplogs.InstrumentationLibraryLogs) InstrumentationLibraryLogsSlice { - return InstrumentationLibraryLogsSlice{orig} -} - -// NewInstrumentationLibraryLogsSlice creates a InstrumentationLibraryLogsSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewInstrumentationLibraryLogsSlice() InstrumentationLibraryLogsSlice { - orig := []*otlplogs.InstrumentationLibraryLogs(nil) - return InstrumentationLibraryLogsSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewInstrumentationLibraryLogsSlice()". -func (es InstrumentationLibraryLogsSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es InstrumentationLibraryLogsSlice) At(ix int) InstrumentationLibraryLogs { - return newInstrumentationLibraryLogs((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es InstrumentationLibraryLogsSlice) CopyTo(dest InstrumentationLibraryLogsSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newInstrumentationLibraryLogs((*es.orig)[i]).CopyTo(newInstrumentationLibraryLogs((*dest.orig)[i])) - } - return - } - origs := make([]otlplogs.InstrumentationLibraryLogs, srcLen) - wrappers := make([]*otlplogs.InstrumentationLibraryLogs, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newInstrumentationLibraryLogs((*es.orig)[i]).CopyTo(newInstrumentationLibraryLogs(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new InstrumentationLibraryLogsSlice can be initialized: -// es := NewInstrumentationLibraryLogsSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es InstrumentationLibraryLogsSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlplogs.InstrumentationLibraryLogs, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlplogs.InstrumentationLibraryLogs, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the InstrumentationLibraryLogsSlice by one and set the -// given InstrumentationLibraryLogs at that new position. The original InstrumentationLibraryLogs -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es InstrumentationLibraryLogsSlice) Append(e InstrumentationLibraryLogs) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty InstrumentationLibraryLogs. -// It returns the newly added InstrumentationLibraryLogs. -func (es InstrumentationLibraryLogsSlice) AppendEmpty() InstrumentationLibraryLogs { - *es.orig = append(*es.orig, &otlplogs.InstrumentationLibraryLogs{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es InstrumentationLibraryLogsSlice) MoveAndAppendTo(dest InstrumentationLibraryLogsSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es InstrumentationLibraryLogsSlice) RemoveIf(f func(InstrumentationLibraryLogs) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// InstrumentationLibraryLogs is a collection of logs from a LibraryInstrumentation. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewInstrumentationLibraryLogs function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibraryLogs struct { - orig *otlplogs.InstrumentationLibraryLogs -} - -func newInstrumentationLibraryLogs(orig *otlplogs.InstrumentationLibraryLogs) InstrumentationLibraryLogs { - return InstrumentationLibraryLogs{orig: orig} -} - -// NewInstrumentationLibraryLogs creates a new empty InstrumentationLibraryLogs. -// -// This must be used only in testing code since no "Set" method available. -func NewInstrumentationLibraryLogs() InstrumentationLibraryLogs { - return newInstrumentationLibraryLogs(&otlplogs.InstrumentationLibraryLogs{}) -} - -// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibraryLogs. -func (ms InstrumentationLibraryLogs) InstrumentationLibrary() InstrumentationLibrary { - return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) -} - -// Logs returns the Logs associated with this InstrumentationLibraryLogs. -func (ms InstrumentationLibraryLogs) Logs() LogSlice { - return newLogSlice(&(*ms.orig).Logs) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms InstrumentationLibraryLogs) CopyTo(dest InstrumentationLibraryLogs) { - ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) - ms.Logs().CopyTo(dest.Logs()) -} - -// LogSlice logically represents a slice of LogRecord. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewLogSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type LogSlice struct { - // orig points to the slice otlplogs.LogRecord field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlplogs.LogRecord -} - -func newLogSlice(orig *[]*otlplogs.LogRecord) LogSlice { - return LogSlice{orig} -} - -// NewLogSlice creates a LogSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewLogSlice() LogSlice { - orig := []*otlplogs.LogRecord(nil) - return LogSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewLogSlice()". -func (es LogSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es LogSlice) At(ix int) LogRecord { - return newLogRecord((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es LogSlice) CopyTo(dest LogSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newLogRecord((*es.orig)[i]).CopyTo(newLogRecord((*dest.orig)[i])) - } - return - } - origs := make([]otlplogs.LogRecord, srcLen) - wrappers := make([]*otlplogs.LogRecord, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newLogRecord((*es.orig)[i]).CopyTo(newLogRecord(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new LogSlice can be initialized: -// es := NewLogSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es LogSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlplogs.LogRecord, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlplogs.LogRecord, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the LogSlice by one and set the -// given LogRecord at that new position. The original LogRecord -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es LogSlice) Append(e LogRecord) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty LogRecord. -// It returns the newly added LogRecord. -func (es LogSlice) AppendEmpty() LogRecord { - *es.orig = append(*es.orig, &otlplogs.LogRecord{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es LogSlice) MoveAndAppendTo(dest LogSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es LogSlice) RemoveIf(f func(LogRecord) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// LogRecord are experimental implementation of OpenTelemetry Log Data Model. - -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewLogRecord function to create new instances. -// Important: zero-initialized instance is not valid for use. -type LogRecord struct { - orig *otlplogs.LogRecord -} - -func newLogRecord(orig *otlplogs.LogRecord) LogRecord { - return LogRecord{orig: orig} -} - -// NewLogRecord creates a new empty LogRecord. -// -// This must be used only in testing code since no "Set" method available. -func NewLogRecord() LogRecord { - return newLogRecord(&otlplogs.LogRecord{}) -} - -// Timestamp returns the timestamp associated with this LogRecord. -func (ms LogRecord) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this LogRecord. -func (ms LogRecord) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// TraceID returns the traceid associated with this LogRecord. -func (ms LogRecord) TraceID() TraceID { - return TraceID{orig: ((*ms.orig).TraceId)} -} - -// SetTraceID replaces the traceid associated with this LogRecord. -func (ms LogRecord) SetTraceID(v TraceID) { - (*ms.orig).TraceId = v.orig -} - -// SpanID returns the spanid associated with this LogRecord. -func (ms LogRecord) SpanID() SpanID { - return SpanID{orig: ((*ms.orig).SpanId)} -} - -// SetSpanID replaces the spanid associated with this LogRecord. -func (ms LogRecord) SetSpanID(v SpanID) { - (*ms.orig).SpanId = v.orig -} - -// Flags returns the flags associated with this LogRecord. -func (ms LogRecord) Flags() uint32 { - return uint32((*ms.orig).Flags) -} - -// SetFlags replaces the flags associated with this LogRecord. -func (ms LogRecord) SetFlags(v uint32) { - (*ms.orig).Flags = uint32(v) -} - -// SeverityText returns the severitytext associated with this LogRecord. -func (ms LogRecord) SeverityText() string { - return (*ms.orig).SeverityText -} - -// SetSeverityText replaces the severitytext associated with this LogRecord. -func (ms LogRecord) SetSeverityText(v string) { - (*ms.orig).SeverityText = v -} - -// SeverityNumber returns the severitynumber associated with this LogRecord. -func (ms LogRecord) SeverityNumber() SeverityNumber { - return SeverityNumber((*ms.orig).SeverityNumber) -} - -// SetSeverityNumber replaces the severitynumber associated with this LogRecord. -func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { - (*ms.orig).SeverityNumber = otlplogs.SeverityNumber(v) -} - -// Name returns the name associated with this LogRecord. -func (ms LogRecord) Name() string { - return (*ms.orig).Name -} - -// SetName replaces the name associated with this LogRecord. -func (ms LogRecord) SetName(v string) { - (*ms.orig).Name = v -} - -// Body returns the body associated with this LogRecord. -func (ms LogRecord) Body() AttributeValue { - return newAttributeValue(&(*ms.orig).Body) -} - -// Attributes returns the Attributes associated with this LogRecord. -func (ms LogRecord) Attributes() AttributeMap { - return newAttributeMap(&(*ms.orig).Attributes) -} - -// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. -func (ms LogRecord) DroppedAttributesCount() uint32 { - return (*ms.orig).DroppedAttributesCount -} - -// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord. -func (ms LogRecord) SetDroppedAttributesCount(v uint32) { - (*ms.orig).DroppedAttributesCount = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms LogRecord) CopyTo(dest LogRecord) { - dest.SetTimestamp(ms.Timestamp()) - dest.SetTraceID(ms.TraceID()) - dest.SetSpanID(ms.SpanID()) - dest.SetFlags(ms.Flags()) - dest.SetSeverityText(ms.SeverityText()) - dest.SetSeverityNumber(ms.SeverityNumber()) - dest.SetName(ms.Name()) - ms.Body().CopyTo(dest.Body()) - ms.Attributes().CopyTo(dest.Attributes()) - dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) -} diff --git a/internal/otel_collector/consumer/pdata/generated_metrics.go b/internal/otel_collector/consumer/pdata/generated_metrics.go deleted file mode 100644 index 51cebc96931..00000000000 --- a/internal/otel_collector/consumer/pdata/generated_metrics.go +++ /dev/null @@ -1,2595 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run cmd/pdatagen/main.go". - -package pdata - -import ( - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" -) - -// ResourceMetricsSlice logically represents a slice of ResourceMetrics. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewResourceMetricsSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceMetricsSlice struct { - // orig points to the slice otlpmetrics.ResourceMetrics field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.ResourceMetrics -} - -func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics) ResourceMetricsSlice { - return ResourceMetricsSlice{orig} -} - -// NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewResourceMetricsSlice() ResourceMetricsSlice { - orig := []*otlpmetrics.ResourceMetrics(nil) - return ResourceMetricsSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewResourceMetricsSlice()". -func (es ResourceMetricsSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es ResourceMetricsSlice) At(ix int) ResourceMetrics { - return newResourceMetrics((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newResourceMetrics((*es.orig)[i]).CopyTo(newResourceMetrics((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.ResourceMetrics, srcLen) - wrappers := make([]*otlpmetrics.ResourceMetrics, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newResourceMetrics((*es.orig)[i]).CopyTo(newResourceMetrics(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new ResourceMetricsSlice can be initialized: -// es := NewResourceMetricsSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es ResourceMetricsSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.ResourceMetrics, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.ResourceMetrics, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the ResourceMetricsSlice by one and set the -// given ResourceMetrics at that new position. The original ResourceMetrics -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ResourceMetricsSlice) Append(e ResourceMetrics) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty ResourceMetrics. -// It returns the newly added ResourceMetrics. -func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics { - *es.orig = append(*es.orig, &otlpmetrics.ResourceMetrics{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// ResourceMetrics is a collection of metrics from a Resource. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewResourceMetrics function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceMetrics struct { - orig *otlpmetrics.ResourceMetrics -} - -func newResourceMetrics(orig *otlpmetrics.ResourceMetrics) ResourceMetrics { - return ResourceMetrics{orig: orig} -} - -// NewResourceMetrics creates a new empty ResourceMetrics. -// -// This must be used only in testing code since no "Set" method available. -func NewResourceMetrics() ResourceMetrics { - return newResourceMetrics(&otlpmetrics.ResourceMetrics{}) -} - -// Resource returns the resource associated with this ResourceMetrics. -func (ms ResourceMetrics) Resource() Resource { - return newResource(&(*ms.orig).Resource) -} - -// InstrumentationLibraryMetrics returns the InstrumentationLibraryMetrics associated with this ResourceMetrics. -func (ms ResourceMetrics) InstrumentationLibraryMetrics() InstrumentationLibraryMetricsSlice { - return newInstrumentationLibraryMetricsSlice(&(*ms.orig).InstrumentationLibraryMetrics) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) { - ms.Resource().CopyTo(dest.Resource()) - ms.InstrumentationLibraryMetrics().CopyTo(dest.InstrumentationLibraryMetrics()) -} - -// InstrumentationLibraryMetricsSlice logically represents a slice of InstrumentationLibraryMetrics. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewInstrumentationLibraryMetricsSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibraryMetricsSlice struct { - // orig points to the slice otlpmetrics.InstrumentationLibraryMetrics field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.InstrumentationLibraryMetrics -} - -func newInstrumentationLibraryMetricsSlice(orig *[]*otlpmetrics.InstrumentationLibraryMetrics) InstrumentationLibraryMetricsSlice { - return InstrumentationLibraryMetricsSlice{orig} -} - -// NewInstrumentationLibraryMetricsSlice creates a InstrumentationLibraryMetricsSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewInstrumentationLibraryMetricsSlice() InstrumentationLibraryMetricsSlice { - orig := []*otlpmetrics.InstrumentationLibraryMetrics(nil) - return InstrumentationLibraryMetricsSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewInstrumentationLibraryMetricsSlice()". -func (es InstrumentationLibraryMetricsSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es InstrumentationLibraryMetricsSlice) At(ix int) InstrumentationLibraryMetrics { - return newInstrumentationLibraryMetrics((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es InstrumentationLibraryMetricsSlice) CopyTo(dest InstrumentationLibraryMetricsSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newInstrumentationLibraryMetrics((*es.orig)[i]).CopyTo(newInstrumentationLibraryMetrics((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.InstrumentationLibraryMetrics, srcLen) - wrappers := make([]*otlpmetrics.InstrumentationLibraryMetrics, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newInstrumentationLibraryMetrics((*es.orig)[i]).CopyTo(newInstrumentationLibraryMetrics(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new InstrumentationLibraryMetricsSlice can be initialized: -// es := NewInstrumentationLibraryMetricsSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es InstrumentationLibraryMetricsSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.InstrumentationLibraryMetrics, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.InstrumentationLibraryMetrics, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the InstrumentationLibraryMetricsSlice by one and set the -// given InstrumentationLibraryMetrics at that new position. The original InstrumentationLibraryMetrics -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es InstrumentationLibraryMetricsSlice) Append(e InstrumentationLibraryMetrics) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty InstrumentationLibraryMetrics. -// It returns the newly added InstrumentationLibraryMetrics. -func (es InstrumentationLibraryMetricsSlice) AppendEmpty() InstrumentationLibraryMetrics { - *es.orig = append(*es.orig, &otlpmetrics.InstrumentationLibraryMetrics{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es InstrumentationLibraryMetricsSlice) MoveAndAppendTo(dest InstrumentationLibraryMetricsSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es InstrumentationLibraryMetricsSlice) RemoveIf(f func(InstrumentationLibraryMetrics) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewInstrumentationLibraryMetrics function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibraryMetrics struct { - orig *otlpmetrics.InstrumentationLibraryMetrics -} - -func newInstrumentationLibraryMetrics(orig *otlpmetrics.InstrumentationLibraryMetrics) InstrumentationLibraryMetrics { - return InstrumentationLibraryMetrics{orig: orig} -} - -// NewInstrumentationLibraryMetrics creates a new empty InstrumentationLibraryMetrics. -// -// This must be used only in testing code since no "Set" method available. -func NewInstrumentationLibraryMetrics() InstrumentationLibraryMetrics { - return newInstrumentationLibraryMetrics(&otlpmetrics.InstrumentationLibraryMetrics{}) -} - -// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibraryMetrics. -func (ms InstrumentationLibraryMetrics) InstrumentationLibrary() InstrumentationLibrary { - return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) -} - -// Metrics returns the Metrics associated with this InstrumentationLibraryMetrics. -func (ms InstrumentationLibraryMetrics) Metrics() MetricSlice { - return newMetricSlice(&(*ms.orig).Metrics) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms InstrumentationLibraryMetrics) CopyTo(dest InstrumentationLibraryMetrics) { - ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) - ms.Metrics().CopyTo(dest.Metrics()) -} - -// MetricSlice logically represents a slice of Metric. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewMetricSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type MetricSlice struct { - // orig points to the slice otlpmetrics.Metric field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.Metric -} - -func newMetricSlice(orig *[]*otlpmetrics.Metric) MetricSlice { - return MetricSlice{orig} -} - -// NewMetricSlice creates a MetricSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewMetricSlice() MetricSlice { - orig := []*otlpmetrics.Metric(nil) - return MetricSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewMetricSlice()". -func (es MetricSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es MetricSlice) At(ix int) Metric { - return newMetric((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es MetricSlice) CopyTo(dest MetricSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newMetric((*es.orig)[i]).CopyTo(newMetric((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.Metric, srcLen) - wrappers := make([]*otlpmetrics.Metric, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newMetric((*es.orig)[i]).CopyTo(newMetric(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new MetricSlice can be initialized: -// es := NewMetricSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es MetricSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.Metric, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.Metric, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the MetricSlice by one and set the -// given Metric at that new position. The original Metric -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es MetricSlice) Append(e Metric) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty Metric. -// It returns the newly added Metric. -func (es MetricSlice) AppendEmpty() Metric { - *es.orig = append(*es.orig, &otlpmetrics.Metric{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es MetricSlice) RemoveIf(f func(Metric) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// Metric represents one metric as a collection of datapoints. -// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewMetric function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Metric struct { - orig *otlpmetrics.Metric -} - -func newMetric(orig *otlpmetrics.Metric) Metric { - return Metric{orig: orig} -} - -// NewMetric creates a new empty Metric. -// -// This must be used only in testing code since no "Set" method available. -func NewMetric() Metric { - return newMetric(&otlpmetrics.Metric{}) -} - -// Name returns the name associated with this Metric. -func (ms Metric) Name() string { - return (*ms.orig).Name -} - -// SetName replaces the name associated with this Metric. -func (ms Metric) SetName(v string) { - (*ms.orig).Name = v -} - -// Description returns the description associated with this Metric. -func (ms Metric) Description() string { - return (*ms.orig).Description -} - -// SetDescription replaces the description associated with this Metric. -func (ms Metric) SetDescription(v string) { - (*ms.orig).Description = v -} - -// Unit returns the unit associated with this Metric. -func (ms Metric) Unit() string { - return (*ms.orig).Unit -} - -// SetUnit replaces the unit associated with this Metric. -func (ms Metric) SetUnit(v string) { - (*ms.orig).Unit = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Metric) CopyTo(dest Metric) { - dest.SetName(ms.Name()) - dest.SetDescription(ms.Description()) - dest.SetUnit(ms.Unit()) - copyData(ms.orig, dest.orig) -} - -// IntGauge represents the type of a int scalar metric that always exports the "current value" for every data point. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntGauge function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntGauge struct { - orig *otlpmetrics.IntGauge -} - -func newIntGauge(orig *otlpmetrics.IntGauge) IntGauge { - return IntGauge{orig: orig} -} - -// NewIntGauge creates a new empty IntGauge. -// -// This must be used only in testing code since no "Set" method available. -func NewIntGauge() IntGauge { - return newIntGauge(&otlpmetrics.IntGauge{}) -} - -// DataPoints returns the DataPoints associated with this IntGauge. -func (ms IntGauge) DataPoints() IntDataPointSlice { - return newIntDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntGauge) CopyTo(dest IntGauge) { - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// DoubleGauge represents the type of a double scalar metric that always exports the "current value" for every data point. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewDoubleGauge function to create new instances. -// Important: zero-initialized instance is not valid for use. -type DoubleGauge struct { - orig *otlpmetrics.DoubleGauge -} - -func newDoubleGauge(orig *otlpmetrics.DoubleGauge) DoubleGauge { - return DoubleGauge{orig: orig} -} - -// NewDoubleGauge creates a new empty DoubleGauge. -// -// This must be used only in testing code since no "Set" method available. -func NewDoubleGauge() DoubleGauge { - return newDoubleGauge(&otlpmetrics.DoubleGauge{}) -} - -// DataPoints returns the DataPoints associated with this DoubleGauge. -func (ms DoubleGauge) DataPoints() DoubleDataPointSlice { - return newDoubleDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms DoubleGauge) CopyTo(dest DoubleGauge) { - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// IntSum represents the type of a numeric int scalar metric that is calculated as a sum of all reported measurements over a time interval. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntSum function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntSum struct { - orig *otlpmetrics.IntSum -} - -func newIntSum(orig *otlpmetrics.IntSum) IntSum { - return IntSum{orig: orig} -} - -// NewIntSum creates a new empty IntSum. -// -// This must be used only in testing code since no "Set" method available. -func NewIntSum() IntSum { - return newIntSum(&otlpmetrics.IntSum{}) -} - -// AggregationTemporality returns the aggregationtemporality associated with this IntSum. -func (ms IntSum) AggregationTemporality() AggregationTemporality { - return AggregationTemporality((*ms.orig).AggregationTemporality) -} - -// SetAggregationTemporality replaces the aggregationtemporality associated with this IntSum. -func (ms IntSum) SetAggregationTemporality(v AggregationTemporality) { - (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) -} - -// IsMonotonic returns the ismonotonic associated with this IntSum. -func (ms IntSum) IsMonotonic() bool { - return (*ms.orig).IsMonotonic -} - -// SetIsMonotonic replaces the ismonotonic associated with this IntSum. -func (ms IntSum) SetIsMonotonic(v bool) { - (*ms.orig).IsMonotonic = v -} - -// DataPoints returns the DataPoints associated with this IntSum. -func (ms IntSum) DataPoints() IntDataPointSlice { - return newIntDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntSum) CopyTo(dest IntSum) { - dest.SetAggregationTemporality(ms.AggregationTemporality()) - dest.SetIsMonotonic(ms.IsMonotonic()) - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// DoubleSum represents the type of a numeric double scalar metric that is calculated as a sum of all reported measurements over a time interval. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewDoubleSum function to create new instances. -// Important: zero-initialized instance is not valid for use. -type DoubleSum struct { - orig *otlpmetrics.DoubleSum -} - -func newDoubleSum(orig *otlpmetrics.DoubleSum) DoubleSum { - return DoubleSum{orig: orig} -} - -// NewDoubleSum creates a new empty DoubleSum. -// -// This must be used only in testing code since no "Set" method available. -func NewDoubleSum() DoubleSum { - return newDoubleSum(&otlpmetrics.DoubleSum{}) -} - -// AggregationTemporality returns the aggregationtemporality associated with this DoubleSum. -func (ms DoubleSum) AggregationTemporality() AggregationTemporality { - return AggregationTemporality((*ms.orig).AggregationTemporality) -} - -// SetAggregationTemporality replaces the aggregationtemporality associated with this DoubleSum. -func (ms DoubleSum) SetAggregationTemporality(v AggregationTemporality) { - (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) -} - -// IsMonotonic returns the ismonotonic associated with this DoubleSum. -func (ms DoubleSum) IsMonotonic() bool { - return (*ms.orig).IsMonotonic -} - -// SetIsMonotonic replaces the ismonotonic associated with this DoubleSum. -func (ms DoubleSum) SetIsMonotonic(v bool) { - (*ms.orig).IsMonotonic = v -} - -// DataPoints returns the DataPoints associated with this DoubleSum. -func (ms DoubleSum) DataPoints() DoubleDataPointSlice { - return newDoubleDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms DoubleSum) CopyTo(dest DoubleSum) { - dest.SetAggregationTemporality(ms.AggregationTemporality()) - dest.SetIsMonotonic(ms.IsMonotonic()) - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// IntHistogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported double measurements over a time interval. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntHistogram function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntHistogram struct { - orig *otlpmetrics.IntHistogram -} - -func newIntHistogram(orig *otlpmetrics.IntHistogram) IntHistogram { - return IntHistogram{orig: orig} -} - -// NewIntHistogram creates a new empty IntHistogram. -// -// This must be used only in testing code since no "Set" method available. -func NewIntHistogram() IntHistogram { - return newIntHistogram(&otlpmetrics.IntHistogram{}) -} - -// AggregationTemporality returns the aggregationtemporality associated with this IntHistogram. -func (ms IntHistogram) AggregationTemporality() AggregationTemporality { - return AggregationTemporality((*ms.orig).AggregationTemporality) -} - -// SetAggregationTemporality replaces the aggregationtemporality associated with this IntHistogram. -func (ms IntHistogram) SetAggregationTemporality(v AggregationTemporality) { - (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) -} - -// DataPoints returns the DataPoints associated with this IntHistogram. -func (ms IntHistogram) DataPoints() IntHistogramDataPointSlice { - return newIntHistogramDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntHistogram) CopyTo(dest IntHistogram) { - dest.SetAggregationTemporality(ms.AggregationTemporality()) - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewHistogram function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Histogram struct { - orig *otlpmetrics.DoubleHistogram -} - -func newHistogram(orig *otlpmetrics.DoubleHistogram) Histogram { - return Histogram{orig: orig} -} - -// NewHistogram creates a new empty Histogram. -// -// This must be used only in testing code since no "Set" method available. -func NewHistogram() Histogram { - return newHistogram(&otlpmetrics.DoubleHistogram{}) -} - -// AggregationTemporality returns the aggregationtemporality associated with this Histogram. -func (ms Histogram) AggregationTemporality() AggregationTemporality { - return AggregationTemporality((*ms.orig).AggregationTemporality) -} - -// SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram. -func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) { - (*ms.orig).AggregationTemporality = otlpmetrics.AggregationTemporality(v) -} - -// DataPoints returns the DataPoints associated with this Histogram. -func (ms Histogram) DataPoints() HistogramDataPointSlice { - return newHistogramDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Histogram) CopyTo(dest Histogram) { - dest.SetAggregationTemporality(ms.AggregationTemporality()) - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSummary function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Summary struct { - orig *otlpmetrics.DoubleSummary -} - -func newSummary(orig *otlpmetrics.DoubleSummary) Summary { - return Summary{orig: orig} -} - -// NewSummary creates a new empty Summary. -// -// This must be used only in testing code since no "Set" method available. -func NewSummary() Summary { - return newSummary(&otlpmetrics.DoubleSummary{}) -} - -// DataPoints returns the DataPoints associated with this Summary. -func (ms Summary) DataPoints() SummaryDataPointSlice { - return newSummaryDataPointSlice(&(*ms.orig).DataPoints) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Summary) CopyTo(dest Summary) { - ms.DataPoints().CopyTo(dest.DataPoints()) -} - -// IntDataPointSlice logically represents a slice of IntDataPoint. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewIntDataPointSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntDataPointSlice struct { - // orig points to the slice otlpmetrics.IntDataPoint field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.IntDataPoint -} - -func newIntDataPointSlice(orig *[]*otlpmetrics.IntDataPoint) IntDataPointSlice { - return IntDataPointSlice{orig} -} - -// NewIntDataPointSlice creates a IntDataPointSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewIntDataPointSlice() IntDataPointSlice { - orig := []*otlpmetrics.IntDataPoint(nil) - return IntDataPointSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewIntDataPointSlice()". -func (es IntDataPointSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es IntDataPointSlice) At(ix int) IntDataPoint { - return newIntDataPoint((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es IntDataPointSlice) CopyTo(dest IntDataPointSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newIntDataPoint((*es.orig)[i]).CopyTo(newIntDataPoint((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.IntDataPoint, srcLen) - wrappers := make([]*otlpmetrics.IntDataPoint, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newIntDataPoint((*es.orig)[i]).CopyTo(newIntDataPoint(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new IntDataPointSlice can be initialized: -// es := NewIntDataPointSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es IntDataPointSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.IntDataPoint, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.IntDataPoint, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the IntDataPointSlice by one and set the -// given IntDataPoint at that new position. The original IntDataPoint -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es IntDataPointSlice) Append(e IntDataPoint) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty IntDataPoint. -// It returns the newly added IntDataPoint. -func (es IntDataPointSlice) AppendEmpty() IntDataPoint { - *es.orig = append(*es.orig, &otlpmetrics.IntDataPoint{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es IntDataPointSlice) MoveAndAppendTo(dest IntDataPointSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es IntDataPointSlice) RemoveIf(f func(IntDataPoint) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// IntDataPoint is a single data point in a timeseries that describes the time-varying values of a scalar int metric. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntDataPoint function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntDataPoint struct { - orig *otlpmetrics.IntDataPoint -} - -func newIntDataPoint(orig *otlpmetrics.IntDataPoint) IntDataPoint { - return IntDataPoint{orig: orig} -} - -// NewIntDataPoint creates a new empty IntDataPoint. -// -// This must be used only in testing code since no "Set" method available. -func NewIntDataPoint() IntDataPoint { - return newIntDataPoint(&otlpmetrics.IntDataPoint{}) -} - -// LabelsMap returns the Labels associated with this IntDataPoint. -func (ms IntDataPoint) LabelsMap() StringMap { - return newStringMap(&(*ms.orig).Labels) -} - -// StartTimestamp returns the starttimestamp associated with this IntDataPoint. -func (ms IntDataPoint) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this IntDataPoint. -func (ms IntDataPoint) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// Timestamp returns the timestamp associated with this IntDataPoint. -func (ms IntDataPoint) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this IntDataPoint. -func (ms IntDataPoint) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Value returns the value associated with this IntDataPoint. -func (ms IntDataPoint) Value() int64 { - return (*ms.orig).Value -} - -// SetValue replaces the value associated with this IntDataPoint. -func (ms IntDataPoint) SetValue(v int64) { - (*ms.orig).Value = v -} - -// Exemplars returns the Exemplars associated with this IntDataPoint. -func (ms IntDataPoint) Exemplars() IntExemplarSlice { - return newIntExemplarSlice(&(*ms.orig).Exemplars) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntDataPoint) CopyTo(dest IntDataPoint) { - ms.LabelsMap().CopyTo(dest.LabelsMap()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetTimestamp(ms.Timestamp()) - dest.SetValue(ms.Value()) - ms.Exemplars().CopyTo(dest.Exemplars()) -} - -// DoubleDataPointSlice logically represents a slice of DoubleDataPoint. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewDoubleDataPointSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type DoubleDataPointSlice struct { - // orig points to the slice otlpmetrics.DoubleDataPoint field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.DoubleDataPoint -} - -func newDoubleDataPointSlice(orig *[]*otlpmetrics.DoubleDataPoint) DoubleDataPointSlice { - return DoubleDataPointSlice{orig} -} - -// NewDoubleDataPointSlice creates a DoubleDataPointSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewDoubleDataPointSlice() DoubleDataPointSlice { - orig := []*otlpmetrics.DoubleDataPoint(nil) - return DoubleDataPointSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewDoubleDataPointSlice()". -func (es DoubleDataPointSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es DoubleDataPointSlice) At(ix int) DoubleDataPoint { - return newDoubleDataPoint((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es DoubleDataPointSlice) CopyTo(dest DoubleDataPointSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newDoubleDataPoint((*es.orig)[i]).CopyTo(newDoubleDataPoint((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.DoubleDataPoint, srcLen) - wrappers := make([]*otlpmetrics.DoubleDataPoint, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newDoubleDataPoint((*es.orig)[i]).CopyTo(newDoubleDataPoint(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new DoubleDataPointSlice can be initialized: -// es := NewDoubleDataPointSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es DoubleDataPointSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.DoubleDataPoint, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.DoubleDataPoint, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the DoubleDataPointSlice by one and set the -// given DoubleDataPoint at that new position. The original DoubleDataPoint -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es DoubleDataPointSlice) Append(e DoubleDataPoint) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty DoubleDataPoint. -// It returns the newly added DoubleDataPoint. -func (es DoubleDataPointSlice) AppendEmpty() DoubleDataPoint { - *es.orig = append(*es.orig, &otlpmetrics.DoubleDataPoint{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es DoubleDataPointSlice) MoveAndAppendTo(dest DoubleDataPointSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es DoubleDataPointSlice) RemoveIf(f func(DoubleDataPoint) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// DoubleDataPoint is a single data point in a timeseries that describes the time-varying value of a double metric. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewDoubleDataPoint function to create new instances. -// Important: zero-initialized instance is not valid for use. -type DoubleDataPoint struct { - orig *otlpmetrics.DoubleDataPoint -} - -func newDoubleDataPoint(orig *otlpmetrics.DoubleDataPoint) DoubleDataPoint { - return DoubleDataPoint{orig: orig} -} - -// NewDoubleDataPoint creates a new empty DoubleDataPoint. -// -// This must be used only in testing code since no "Set" method available. -func NewDoubleDataPoint() DoubleDataPoint { - return newDoubleDataPoint(&otlpmetrics.DoubleDataPoint{}) -} - -// LabelsMap returns the Labels associated with this DoubleDataPoint. -func (ms DoubleDataPoint) LabelsMap() StringMap { - return newStringMap(&(*ms.orig).Labels) -} - -// StartTimestamp returns the starttimestamp associated with this DoubleDataPoint. -func (ms DoubleDataPoint) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this DoubleDataPoint. -func (ms DoubleDataPoint) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// Timestamp returns the timestamp associated with this DoubleDataPoint. -func (ms DoubleDataPoint) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this DoubleDataPoint. -func (ms DoubleDataPoint) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Value returns the value associated with this DoubleDataPoint. -func (ms DoubleDataPoint) Value() float64 { - return (*ms.orig).Value -} - -// SetValue replaces the value associated with this DoubleDataPoint. -func (ms DoubleDataPoint) SetValue(v float64) { - (*ms.orig).Value = v -} - -// Exemplars returns the Exemplars associated with this DoubleDataPoint. -func (ms DoubleDataPoint) Exemplars() ExemplarSlice { - return newExemplarSlice(&(*ms.orig).Exemplars) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms DoubleDataPoint) CopyTo(dest DoubleDataPoint) { - ms.LabelsMap().CopyTo(dest.LabelsMap()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetTimestamp(ms.Timestamp()) - dest.SetValue(ms.Value()) - ms.Exemplars().CopyTo(dest.Exemplars()) -} - -// IntHistogramDataPointSlice logically represents a slice of IntHistogramDataPoint. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewIntHistogramDataPointSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntHistogramDataPointSlice struct { - // orig points to the slice otlpmetrics.IntHistogramDataPoint field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.IntHistogramDataPoint -} - -func newIntHistogramDataPointSlice(orig *[]*otlpmetrics.IntHistogramDataPoint) IntHistogramDataPointSlice { - return IntHistogramDataPointSlice{orig} -} - -// NewIntHistogramDataPointSlice creates a IntHistogramDataPointSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewIntHistogramDataPointSlice() IntHistogramDataPointSlice { - orig := []*otlpmetrics.IntHistogramDataPoint(nil) - return IntHistogramDataPointSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewIntHistogramDataPointSlice()". -func (es IntHistogramDataPointSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es IntHistogramDataPointSlice) At(ix int) IntHistogramDataPoint { - return newIntHistogramDataPoint((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es IntHistogramDataPointSlice) CopyTo(dest IntHistogramDataPointSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newIntHistogramDataPoint((*es.orig)[i]).CopyTo(newIntHistogramDataPoint((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.IntHistogramDataPoint, srcLen) - wrappers := make([]*otlpmetrics.IntHistogramDataPoint, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newIntHistogramDataPoint((*es.orig)[i]).CopyTo(newIntHistogramDataPoint(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new IntHistogramDataPointSlice can be initialized: -// es := NewIntHistogramDataPointSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es IntHistogramDataPointSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.IntHistogramDataPoint, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.IntHistogramDataPoint, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the IntHistogramDataPointSlice by one and set the -// given IntHistogramDataPoint at that new position. The original IntHistogramDataPoint -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es IntHistogramDataPointSlice) Append(e IntHistogramDataPoint) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty IntHistogramDataPoint. -// It returns the newly added IntHistogramDataPoint. -func (es IntHistogramDataPointSlice) AppendEmpty() IntHistogramDataPoint { - *es.orig = append(*es.orig, &otlpmetrics.IntHistogramDataPoint{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es IntHistogramDataPointSlice) MoveAndAppendTo(dest IntHistogramDataPointSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es IntHistogramDataPointSlice) RemoveIf(f func(IntHistogramDataPoint) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// IntHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of int values. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntHistogramDataPoint function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntHistogramDataPoint struct { - orig *otlpmetrics.IntHistogramDataPoint -} - -func newIntHistogramDataPoint(orig *otlpmetrics.IntHistogramDataPoint) IntHistogramDataPoint { - return IntHistogramDataPoint{orig: orig} -} - -// NewIntHistogramDataPoint creates a new empty IntHistogramDataPoint. -// -// This must be used only in testing code since no "Set" method available. -func NewIntHistogramDataPoint() IntHistogramDataPoint { - return newIntHistogramDataPoint(&otlpmetrics.IntHistogramDataPoint{}) -} - -// LabelsMap returns the Labels associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) LabelsMap() StringMap { - return newStringMap(&(*ms.orig).Labels) -} - -// StartTimestamp returns the starttimestamp associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// Timestamp returns the timestamp associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Count returns the count associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) Count() uint64 { - return (*ms.orig).Count -} - -// SetCount replaces the count associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetCount(v uint64) { - (*ms.orig).Count = v -} - -// Sum returns the sum associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) Sum() int64 { - return (*ms.orig).Sum -} - -// SetSum replaces the sum associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetSum(v int64) { - (*ms.orig).Sum = v -} - -// BucketCounts returns the bucketcounts associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) BucketCounts() []uint64 { - return (*ms.orig).BucketCounts -} - -// SetBucketCounts replaces the bucketcounts associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetBucketCounts(v []uint64) { - (*ms.orig).BucketCounts = v -} - -// ExplicitBounds returns the explicitbounds associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) ExplicitBounds() []float64 { - return (*ms.orig).ExplicitBounds -} - -// SetExplicitBounds replaces the explicitbounds associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) SetExplicitBounds(v []float64) { - (*ms.orig).ExplicitBounds = v -} - -// Exemplars returns the Exemplars associated with this IntHistogramDataPoint. -func (ms IntHistogramDataPoint) Exemplars() IntExemplarSlice { - return newIntExemplarSlice(&(*ms.orig).Exemplars) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntHistogramDataPoint) CopyTo(dest IntHistogramDataPoint) { - ms.LabelsMap().CopyTo(dest.LabelsMap()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetTimestamp(ms.Timestamp()) - dest.SetCount(ms.Count()) - dest.SetSum(ms.Sum()) - dest.SetBucketCounts(ms.BucketCounts()) - dest.SetExplicitBounds(ms.ExplicitBounds()) - ms.Exemplars().CopyTo(dest.Exemplars()) -} - -// HistogramDataPointSlice logically represents a slice of HistogramDataPoint. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewHistogramDataPointSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type HistogramDataPointSlice struct { - // orig points to the slice otlpmetrics.DoubleHistogramDataPoint field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.DoubleHistogramDataPoint -} - -func newHistogramDataPointSlice(orig *[]*otlpmetrics.DoubleHistogramDataPoint) HistogramDataPointSlice { - return HistogramDataPointSlice{orig} -} - -// NewHistogramDataPointSlice creates a HistogramDataPointSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewHistogramDataPointSlice() HistogramDataPointSlice { - orig := []*otlpmetrics.DoubleHistogramDataPoint(nil) - return HistogramDataPointSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewHistogramDataPointSlice()". -func (es HistogramDataPointSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es HistogramDataPointSlice) At(ix int) HistogramDataPoint { - return newHistogramDataPoint((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newHistogramDataPoint((*es.orig)[i]).CopyTo(newHistogramDataPoint((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.DoubleHistogramDataPoint, srcLen) - wrappers := make([]*otlpmetrics.DoubleHistogramDataPoint, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newHistogramDataPoint((*es.orig)[i]).CopyTo(newHistogramDataPoint(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new HistogramDataPointSlice can be initialized: -// es := NewHistogramDataPointSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es HistogramDataPointSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.DoubleHistogramDataPoint, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.DoubleHistogramDataPoint, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the HistogramDataPointSlice by one and set the -// given HistogramDataPoint at that new position. The original HistogramDataPoint -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es HistogramDataPointSlice) Append(e HistogramDataPoint) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty HistogramDataPoint. -// It returns the newly added HistogramDataPoint. -func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint { - *es.orig = append(*es.orig, &otlpmetrics.DoubleHistogramDataPoint{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es HistogramDataPointSlice) MoveAndAppendTo(dest HistogramDataPointSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewHistogramDataPoint function to create new instances. -// Important: zero-initialized instance is not valid for use. -type HistogramDataPoint struct { - orig *otlpmetrics.DoubleHistogramDataPoint -} - -func newHistogramDataPoint(orig *otlpmetrics.DoubleHistogramDataPoint) HistogramDataPoint { - return HistogramDataPoint{orig: orig} -} - -// NewHistogramDataPoint creates a new empty HistogramDataPoint. -// -// This must be used only in testing code since no "Set" method available. -func NewHistogramDataPoint() HistogramDataPoint { - return newHistogramDataPoint(&otlpmetrics.DoubleHistogramDataPoint{}) -} - -// LabelsMap returns the Labels associated with this HistogramDataPoint. -func (ms HistogramDataPoint) LabelsMap() StringMap { - return newStringMap(&(*ms.orig).Labels) -} - -// StartTimestamp returns the starttimestamp associated with this HistogramDataPoint. -func (ms HistogramDataPoint) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// Timestamp returns the timestamp associated with this HistogramDataPoint. -func (ms HistogramDataPoint) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Count returns the count associated with this HistogramDataPoint. -func (ms HistogramDataPoint) Count() uint64 { - return (*ms.orig).Count -} - -// SetCount replaces the count associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetCount(v uint64) { - (*ms.orig).Count = v -} - -// Sum returns the sum associated with this HistogramDataPoint. -func (ms HistogramDataPoint) Sum() float64 { - return (*ms.orig).Sum -} - -// SetSum replaces the sum associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetSum(v float64) { - (*ms.orig).Sum = v -} - -// BucketCounts returns the bucketcounts associated with this HistogramDataPoint. -func (ms HistogramDataPoint) BucketCounts() []uint64 { - return (*ms.orig).BucketCounts -} - -// SetBucketCounts replaces the bucketcounts associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetBucketCounts(v []uint64) { - (*ms.orig).BucketCounts = v -} - -// ExplicitBounds returns the explicitbounds associated with this HistogramDataPoint. -func (ms HistogramDataPoint) ExplicitBounds() []float64 { - return (*ms.orig).ExplicitBounds -} - -// SetExplicitBounds replaces the explicitbounds associated with this HistogramDataPoint. -func (ms HistogramDataPoint) SetExplicitBounds(v []float64) { - (*ms.orig).ExplicitBounds = v -} - -// Exemplars returns the Exemplars associated with this HistogramDataPoint. -func (ms HistogramDataPoint) Exemplars() ExemplarSlice { - return newExemplarSlice(&(*ms.orig).Exemplars) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) { - ms.LabelsMap().CopyTo(dest.LabelsMap()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetTimestamp(ms.Timestamp()) - dest.SetCount(ms.Count()) - dest.SetSum(ms.Sum()) - dest.SetBucketCounts(ms.BucketCounts()) - dest.SetExplicitBounds(ms.ExplicitBounds()) - ms.Exemplars().CopyTo(dest.Exemplars()) -} - -// SummaryDataPointSlice logically represents a slice of SummaryDataPoint. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewSummaryDataPointSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SummaryDataPointSlice struct { - // orig points to the slice otlpmetrics.DoubleSummaryDataPoint field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.DoubleSummaryDataPoint -} - -func newSummaryDataPointSlice(orig *[]*otlpmetrics.DoubleSummaryDataPoint) SummaryDataPointSlice { - return SummaryDataPointSlice{orig} -} - -// NewSummaryDataPointSlice creates a SummaryDataPointSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewSummaryDataPointSlice() SummaryDataPointSlice { - orig := []*otlpmetrics.DoubleSummaryDataPoint(nil) - return SummaryDataPointSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewSummaryDataPointSlice()". -func (es SummaryDataPointSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es SummaryDataPointSlice) At(ix int) SummaryDataPoint { - return newSummaryDataPoint((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newSummaryDataPoint((*es.orig)[i]).CopyTo(newSummaryDataPoint((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.DoubleSummaryDataPoint, srcLen) - wrappers := make([]*otlpmetrics.DoubleSummaryDataPoint, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newSummaryDataPoint((*es.orig)[i]).CopyTo(newSummaryDataPoint(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new SummaryDataPointSlice can be initialized: -// es := NewSummaryDataPointSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es SummaryDataPointSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.DoubleSummaryDataPoint, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.DoubleSummaryDataPoint, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the SummaryDataPointSlice by one and set the -// given SummaryDataPoint at that new position. The original SummaryDataPoint -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es SummaryDataPointSlice) Append(e SummaryDataPoint) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty SummaryDataPoint. -// It returns the newly added SummaryDataPoint. -func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint { - *es.orig = append(*es.orig, &otlpmetrics.DoubleSummaryDataPoint{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es SummaryDataPointSlice) MoveAndAppendTo(dest SummaryDataPointSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSummaryDataPoint function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SummaryDataPoint struct { - orig *otlpmetrics.DoubleSummaryDataPoint -} - -func newSummaryDataPoint(orig *otlpmetrics.DoubleSummaryDataPoint) SummaryDataPoint { - return SummaryDataPoint{orig: orig} -} - -// NewSummaryDataPoint creates a new empty SummaryDataPoint. -// -// This must be used only in testing code since no "Set" method available. -func NewSummaryDataPoint() SummaryDataPoint { - return newSummaryDataPoint(&otlpmetrics.DoubleSummaryDataPoint{}) -} - -// LabelsMap returns the Labels associated with this SummaryDataPoint. -func (ms SummaryDataPoint) LabelsMap() StringMap { - return newStringMap(&(*ms.orig).Labels) -} - -// StartTimestamp returns the starttimestamp associated with this SummaryDataPoint. -func (ms SummaryDataPoint) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this SummaryDataPoint. -func (ms SummaryDataPoint) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// Timestamp returns the timestamp associated with this SummaryDataPoint. -func (ms SummaryDataPoint) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this SummaryDataPoint. -func (ms SummaryDataPoint) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Count returns the count associated with this SummaryDataPoint. -func (ms SummaryDataPoint) Count() uint64 { - return (*ms.orig).Count -} - -// SetCount replaces the count associated with this SummaryDataPoint. -func (ms SummaryDataPoint) SetCount(v uint64) { - (*ms.orig).Count = v -} - -// Sum returns the sum associated with this SummaryDataPoint. -func (ms SummaryDataPoint) Sum() float64 { - return (*ms.orig).Sum -} - -// SetSum replaces the sum associated with this SummaryDataPoint. -func (ms SummaryDataPoint) SetSum(v float64) { - (*ms.orig).Sum = v -} - -// QuantileValues returns the QuantileValues associated with this SummaryDataPoint. -func (ms SummaryDataPoint) QuantileValues() ValueAtQuantileSlice { - return newValueAtQuantileSlice(&(*ms.orig).QuantileValues) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) { - ms.LabelsMap().CopyTo(dest.LabelsMap()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetTimestamp(ms.Timestamp()) - dest.SetCount(ms.Count()) - dest.SetSum(ms.Sum()) - ms.QuantileValues().CopyTo(dest.QuantileValues()) -} - -// ValueAtQuantileSlice logically represents a slice of ValueAtQuantile. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewValueAtQuantileSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ValueAtQuantileSlice struct { - // orig points to the slice otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile -} - -func newValueAtQuantileSlice(orig *[]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile) ValueAtQuantileSlice { - return ValueAtQuantileSlice{orig} -} - -// NewValueAtQuantileSlice creates a ValueAtQuantileSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewValueAtQuantileSlice() ValueAtQuantileSlice { - orig := []*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile(nil) - return ValueAtQuantileSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewValueAtQuantileSlice()". -func (es ValueAtQuantileSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es ValueAtQuantileSlice) At(ix int) ValueAtQuantile { - return newValueAtQuantile((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es ValueAtQuantileSlice) CopyTo(dest ValueAtQuantileSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newValueAtQuantile((*es.orig)[i]).CopyTo(newValueAtQuantile((*dest.orig)[i])) - } - return - } - origs := make([]otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, srcLen) - wrappers := make([]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newValueAtQuantile((*es.orig)[i]).CopyTo(newValueAtQuantile(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new ValueAtQuantileSlice can be initialized: -// es := NewValueAtQuantileSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es ValueAtQuantileSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the ValueAtQuantileSlice by one and set the -// given ValueAtQuantile at that new position. The original ValueAtQuantile -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ValueAtQuantileSlice) Append(e ValueAtQuantile) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty ValueAtQuantile. -// It returns the newly added ValueAtQuantile. -func (es ValueAtQuantileSlice) AppendEmpty() ValueAtQuantile { - *es.orig = append(*es.orig, &otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es ValueAtQuantileSlice) MoveAndAppendTo(dest ValueAtQuantileSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es ValueAtQuantileSlice) RemoveIf(f func(ValueAtQuantile) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// ValueAtQuantile is a quantile value within a Summary data point. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewValueAtQuantile function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ValueAtQuantile struct { - orig *otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile -} - -func newValueAtQuantile(orig *otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile) ValueAtQuantile { - return ValueAtQuantile{orig: orig} -} - -// NewValueAtQuantile creates a new empty ValueAtQuantile. -// -// This must be used only in testing code since no "Set" method available. -func NewValueAtQuantile() ValueAtQuantile { - return newValueAtQuantile(&otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{}) -} - -// Quantile returns the quantile associated with this ValueAtQuantile. -func (ms ValueAtQuantile) Quantile() float64 { - return (*ms.orig).Quantile -} - -// SetQuantile replaces the quantile associated with this ValueAtQuantile. -func (ms ValueAtQuantile) SetQuantile(v float64) { - (*ms.orig).Quantile = v -} - -// Value returns the value associated with this ValueAtQuantile. -func (ms ValueAtQuantile) Value() float64 { - return (*ms.orig).Value -} - -// SetValue replaces the value associated with this ValueAtQuantile. -func (ms ValueAtQuantile) SetValue(v float64) { - (*ms.orig).Value = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms ValueAtQuantile) CopyTo(dest ValueAtQuantile) { - dest.SetQuantile(ms.Quantile()) - dest.SetValue(ms.Value()) -} - -// IntExemplarSlice logically represents a slice of IntExemplar. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewIntExemplarSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntExemplarSlice struct { - // orig points to the slice otlpmetrics.IntExemplar field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]otlpmetrics.IntExemplar -} - -func newIntExemplarSlice(orig *[]otlpmetrics.IntExemplar) IntExemplarSlice { - return IntExemplarSlice{orig} -} - -// NewIntExemplarSlice creates a IntExemplarSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewIntExemplarSlice() IntExemplarSlice { - orig := []otlpmetrics.IntExemplar(nil) - return IntExemplarSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewIntExemplarSlice()". -func (es IntExemplarSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es IntExemplarSlice) At(ix int) IntExemplar { - return newIntExemplar(&(*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es IntExemplarSlice) CopyTo(dest IntExemplarSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - } else { - (*dest.orig) = make([]otlpmetrics.IntExemplar, srcLen) - } - - for i := range *es.orig { - newIntExemplar(&(*es.orig)[i]).CopyTo(newIntExemplar(&(*dest.orig)[i])) - } -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new IntExemplarSlice can be initialized: -// es := NewIntExemplarSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es IntExemplarSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]otlpmetrics.IntExemplar, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - empty := otlpmetrics.IntExemplar{} - for i := oldLen; i < newLen; i++ { - *es.orig = append(*es.orig, empty) - } -} - -// Append will increase the length of the IntExemplarSlice by one and set the -// given IntExemplar at that new position. The original IntExemplar -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es IntExemplarSlice) Append(e IntExemplar) { - *es.orig = append(*es.orig, *e.orig) -} - -// AppendEmpty will append to the end of the slice an empty IntExemplar. -// It returns the newly added IntExemplar. -func (es IntExemplarSlice) AppendEmpty() IntExemplar { - *es.orig = append(*es.orig, otlpmetrics.IntExemplar{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es IntExemplarSlice) MoveAndAppendTo(dest IntExemplarSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es IntExemplarSlice) RemoveIf(f func(IntExemplar) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// IntExemplar is a sample input int measurement. -// -// Exemplars also hold information about the environment when the measurement was recorded, -// for example the span and trace ID of the active span when the exemplar was recorded. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewIntExemplar function to create new instances. -// Important: zero-initialized instance is not valid for use. -type IntExemplar struct { - orig *otlpmetrics.IntExemplar -} - -func newIntExemplar(orig *otlpmetrics.IntExemplar) IntExemplar { - return IntExemplar{orig: orig} -} - -// NewIntExemplar creates a new empty IntExemplar. -// -// This must be used only in testing code since no "Set" method available. -func NewIntExemplar() IntExemplar { - return newIntExemplar(&otlpmetrics.IntExemplar{}) -} - -// Timestamp returns the timestamp associated with this IntExemplar. -func (ms IntExemplar) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this IntExemplar. -func (ms IntExemplar) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Value returns the value associated with this IntExemplar. -func (ms IntExemplar) Value() int64 { - return (*ms.orig).Value -} - -// SetValue replaces the value associated with this IntExemplar. -func (ms IntExemplar) SetValue(v int64) { - (*ms.orig).Value = v -} - -// FilteredLabels returns the FilteredLabels associated with this IntExemplar. -func (ms IntExemplar) FilteredLabels() StringMap { - return newStringMap(&(*ms.orig).FilteredLabels) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms IntExemplar) CopyTo(dest IntExemplar) { - dest.SetTimestamp(ms.Timestamp()) - dest.SetValue(ms.Value()) - ms.FilteredLabels().CopyTo(dest.FilteredLabels()) -} - -// ExemplarSlice logically represents a slice of Exemplar. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewExemplarSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ExemplarSlice struct { - // orig points to the slice otlpmetrics.DoubleExemplar field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]otlpmetrics.DoubleExemplar -} - -func newExemplarSlice(orig *[]otlpmetrics.DoubleExemplar) ExemplarSlice { - return ExemplarSlice{orig} -} - -// NewExemplarSlice creates a ExemplarSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewExemplarSlice() ExemplarSlice { - orig := []otlpmetrics.DoubleExemplar(nil) - return ExemplarSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewExemplarSlice()". -func (es ExemplarSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es ExemplarSlice) At(ix int) Exemplar { - return newExemplar(&(*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es ExemplarSlice) CopyTo(dest ExemplarSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - } else { - (*dest.orig) = make([]otlpmetrics.DoubleExemplar, srcLen) - } - - for i := range *es.orig { - newExemplar(&(*es.orig)[i]).CopyTo(newExemplar(&(*dest.orig)[i])) - } -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new ExemplarSlice can be initialized: -// es := NewExemplarSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es ExemplarSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]otlpmetrics.DoubleExemplar, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - empty := otlpmetrics.DoubleExemplar{} - for i := oldLen; i < newLen; i++ { - *es.orig = append(*es.orig, empty) - } -} - -// Append will increase the length of the ExemplarSlice by one and set the -// given Exemplar at that new position. The original Exemplar -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ExemplarSlice) Append(e Exemplar) { - *es.orig = append(*es.orig, *e.orig) -} - -// AppendEmpty will append to the end of the slice an empty Exemplar. -// It returns the newly added Exemplar. -func (es ExemplarSlice) AppendEmpty() Exemplar { - *es.orig = append(*es.orig, otlpmetrics.DoubleExemplar{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es ExemplarSlice) MoveAndAppendTo(dest ExemplarSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// Exemplar is a sample input double measurement. -// -// Exemplars also hold information about the environment when the measurement was recorded, -// for example the span and trace ID of the active span when the exemplar was recorded. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewExemplar function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Exemplar struct { - orig *otlpmetrics.DoubleExemplar -} - -func newExemplar(orig *otlpmetrics.DoubleExemplar) Exemplar { - return Exemplar{orig: orig} -} - -// NewExemplar creates a new empty Exemplar. -// -// This must be used only in testing code since no "Set" method available. -func NewExemplar() Exemplar { - return newExemplar(&otlpmetrics.DoubleExemplar{}) -} - -// Timestamp returns the timestamp associated with this Exemplar. -func (ms Exemplar) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this Exemplar. -func (ms Exemplar) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Value returns the value associated with this Exemplar. -func (ms Exemplar) Value() float64 { - return (*ms.orig).Value -} - -// SetValue replaces the value associated with this Exemplar. -func (ms Exemplar) SetValue(v float64) { - (*ms.orig).Value = v -} - -// FilteredLabels returns the FilteredLabels associated with this Exemplar. -func (ms Exemplar) FilteredLabels() StringMap { - return newStringMap(&(*ms.orig).FilteredLabels) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Exemplar) CopyTo(dest Exemplar) { - dest.SetTimestamp(ms.Timestamp()) - dest.SetValue(ms.Value()) - ms.FilteredLabels().CopyTo(dest.FilteredLabels()) -} diff --git a/internal/otel_collector/consumer/pdata/generated_resource.go b/internal/otel_collector/consumer/pdata/generated_resource.go deleted file mode 100644 index 069e3562624..00000000000 --- a/internal/otel_collector/consumer/pdata/generated_resource.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run cmd/pdatagen/main.go". - -package pdata - -import ( - otlpresource "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" -) - -// Resource is a message representing the resource information. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewResource function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Resource struct { - orig *otlpresource.Resource -} - -func newResource(orig *otlpresource.Resource) Resource { - return Resource{orig: orig} -} - -// NewResource creates a new empty Resource. -// -// This must be used only in testing code since no "Set" method available. -func NewResource() Resource { - return newResource(&otlpresource.Resource{}) -} - -// Attributes returns the Attributes associated with this Resource. -func (ms Resource) Attributes() AttributeMap { - return newAttributeMap(&(*ms.orig).Attributes) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Resource) CopyTo(dest Resource) { - ms.Attributes().CopyTo(dest.Attributes()) -} diff --git a/internal/otel_collector/consumer/pdata/generated_trace.go b/internal/otel_collector/consumer/pdata/generated_trace.go deleted file mode 100644 index dcf52f9b834..00000000000 --- a/internal/otel_collector/consumer/pdata/generated_trace.go +++ /dev/null @@ -1,1168 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by "cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run cmd/pdatagen/main.go". - -package pdata - -import ( - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" -) - -// ResourceSpansSlice logically represents a slice of ResourceSpans. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewResourceSpansSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceSpansSlice struct { - // orig points to the slice otlptrace.ResourceSpans field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlptrace.ResourceSpans -} - -func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans) ResourceSpansSlice { - return ResourceSpansSlice{orig} -} - -// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewResourceSpansSlice() ResourceSpansSlice { - orig := []*otlptrace.ResourceSpans(nil) - return ResourceSpansSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewResourceSpansSlice()". -func (es ResourceSpansSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es ResourceSpansSlice) At(ix int) ResourceSpans { - return newResourceSpans((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newResourceSpans((*es.orig)[i]).CopyTo(newResourceSpans((*dest.orig)[i])) - } - return - } - origs := make([]otlptrace.ResourceSpans, srcLen) - wrappers := make([]*otlptrace.ResourceSpans, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newResourceSpans((*es.orig)[i]).CopyTo(newResourceSpans(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new ResourceSpansSlice can be initialized: -// es := NewResourceSpansSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es ResourceSpansSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlptrace.ResourceSpans, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlptrace.ResourceSpans, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the ResourceSpansSlice by one and set the -// given ResourceSpans at that new position. The original ResourceSpans -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es ResourceSpansSlice) Append(e ResourceSpans) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty ResourceSpans. -// It returns the newly added ResourceSpans. -func (es ResourceSpansSlice) AppendEmpty() ResourceSpans { - *es.orig = append(*es.orig, &otlptrace.ResourceSpans{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// ResourceSpans is a collection of spans from a Resource. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewResourceSpans function to create new instances. -// Important: zero-initialized instance is not valid for use. -type ResourceSpans struct { - orig *otlptrace.ResourceSpans -} - -func newResourceSpans(orig *otlptrace.ResourceSpans) ResourceSpans { - return ResourceSpans{orig: orig} -} - -// NewResourceSpans creates a new empty ResourceSpans. -// -// This must be used only in testing code since no "Set" method available. -func NewResourceSpans() ResourceSpans { - return newResourceSpans(&otlptrace.ResourceSpans{}) -} - -// Resource returns the resource associated with this ResourceSpans. -func (ms ResourceSpans) Resource() Resource { - return newResource(&(*ms.orig).Resource) -} - -// InstrumentationLibrarySpans returns the InstrumentationLibrarySpans associated with this ResourceSpans. -func (ms ResourceSpans) InstrumentationLibrarySpans() InstrumentationLibrarySpansSlice { - return newInstrumentationLibrarySpansSlice(&(*ms.orig).InstrumentationLibrarySpans) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms ResourceSpans) CopyTo(dest ResourceSpans) { - ms.Resource().CopyTo(dest.Resource()) - ms.InstrumentationLibrarySpans().CopyTo(dest.InstrumentationLibrarySpans()) -} - -// InstrumentationLibrarySpansSlice logically represents a slice of InstrumentationLibrarySpans. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewInstrumentationLibrarySpansSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibrarySpansSlice struct { - // orig points to the slice otlptrace.InstrumentationLibrarySpans field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlptrace.InstrumentationLibrarySpans -} - -func newInstrumentationLibrarySpansSlice(orig *[]*otlptrace.InstrumentationLibrarySpans) InstrumentationLibrarySpansSlice { - return InstrumentationLibrarySpansSlice{orig} -} - -// NewInstrumentationLibrarySpansSlice creates a InstrumentationLibrarySpansSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewInstrumentationLibrarySpansSlice() InstrumentationLibrarySpansSlice { - orig := []*otlptrace.InstrumentationLibrarySpans(nil) - return InstrumentationLibrarySpansSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewInstrumentationLibrarySpansSlice()". -func (es InstrumentationLibrarySpansSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es InstrumentationLibrarySpansSlice) At(ix int) InstrumentationLibrarySpans { - return newInstrumentationLibrarySpans((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es InstrumentationLibrarySpansSlice) CopyTo(dest InstrumentationLibrarySpansSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newInstrumentationLibrarySpans((*es.orig)[i]).CopyTo(newInstrumentationLibrarySpans((*dest.orig)[i])) - } - return - } - origs := make([]otlptrace.InstrumentationLibrarySpans, srcLen) - wrappers := make([]*otlptrace.InstrumentationLibrarySpans, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newInstrumentationLibrarySpans((*es.orig)[i]).CopyTo(newInstrumentationLibrarySpans(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new InstrumentationLibrarySpansSlice can be initialized: -// es := NewInstrumentationLibrarySpansSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es InstrumentationLibrarySpansSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlptrace.InstrumentationLibrarySpans, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlptrace.InstrumentationLibrarySpans, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the InstrumentationLibrarySpansSlice by one and set the -// given InstrumentationLibrarySpans at that new position. The original InstrumentationLibrarySpans -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es InstrumentationLibrarySpansSlice) Append(e InstrumentationLibrarySpans) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty InstrumentationLibrarySpans. -// It returns the newly added InstrumentationLibrarySpans. -func (es InstrumentationLibrarySpansSlice) AppendEmpty() InstrumentationLibrarySpans { - *es.orig = append(*es.orig, &otlptrace.InstrumentationLibrarySpans{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es InstrumentationLibrarySpansSlice) MoveAndAppendTo(dest InstrumentationLibrarySpansSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es InstrumentationLibrarySpansSlice) RemoveIf(f func(InstrumentationLibrarySpans) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// InstrumentationLibrarySpans is a collection of spans from a LibraryInstrumentation. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewInstrumentationLibrarySpans function to create new instances. -// Important: zero-initialized instance is not valid for use. -type InstrumentationLibrarySpans struct { - orig *otlptrace.InstrumentationLibrarySpans -} - -func newInstrumentationLibrarySpans(orig *otlptrace.InstrumentationLibrarySpans) InstrumentationLibrarySpans { - return InstrumentationLibrarySpans{orig: orig} -} - -// NewInstrumentationLibrarySpans creates a new empty InstrumentationLibrarySpans. -// -// This must be used only in testing code since no "Set" method available. -func NewInstrumentationLibrarySpans() InstrumentationLibrarySpans { - return newInstrumentationLibrarySpans(&otlptrace.InstrumentationLibrarySpans{}) -} - -// InstrumentationLibrary returns the instrumentationlibrary associated with this InstrumentationLibrarySpans. -func (ms InstrumentationLibrarySpans) InstrumentationLibrary() InstrumentationLibrary { - return newInstrumentationLibrary(&(*ms.orig).InstrumentationLibrary) -} - -// Spans returns the Spans associated with this InstrumentationLibrarySpans. -func (ms InstrumentationLibrarySpans) Spans() SpanSlice { - return newSpanSlice(&(*ms.orig).Spans) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms InstrumentationLibrarySpans) CopyTo(dest InstrumentationLibrarySpans) { - ms.InstrumentationLibrary().CopyTo(dest.InstrumentationLibrary()) - ms.Spans().CopyTo(dest.Spans()) -} - -// SpanSlice logically represents a slice of Span. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewSpanSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanSlice struct { - // orig points to the slice otlptrace.Span field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlptrace.Span -} - -func newSpanSlice(orig *[]*otlptrace.Span) SpanSlice { - return SpanSlice{orig} -} - -// NewSpanSlice creates a SpanSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewSpanSlice() SpanSlice { - orig := []*otlptrace.Span(nil) - return SpanSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewSpanSlice()". -func (es SpanSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es SpanSlice) At(ix int) Span { - return newSpan((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es SpanSlice) CopyTo(dest SpanSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newSpan((*es.orig)[i]).CopyTo(newSpan((*dest.orig)[i])) - } - return - } - origs := make([]otlptrace.Span, srcLen) - wrappers := make([]*otlptrace.Span, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newSpan((*es.orig)[i]).CopyTo(newSpan(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new SpanSlice can be initialized: -// es := NewSpanSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es SpanSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlptrace.Span, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlptrace.Span, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the SpanSlice by one and set the -// given Span at that new position. The original Span -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es SpanSlice) Append(e Span) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty Span. -// It returns the newly added Span. -func (es SpanSlice) AppendEmpty() Span { - *es.orig = append(*es.orig, &otlptrace.Span{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es SpanSlice) RemoveIf(f func(Span) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// Span represents a single operation within a trace. -// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSpan function to create new instances. -// Important: zero-initialized instance is not valid for use. -type Span struct { - orig *otlptrace.Span -} - -func newSpan(orig *otlptrace.Span) Span { - return Span{orig: orig} -} - -// NewSpan creates a new empty Span. -// -// This must be used only in testing code since no "Set" method available. -func NewSpan() Span { - return newSpan(&otlptrace.Span{}) -} - -// TraceID returns the traceid associated with this Span. -func (ms Span) TraceID() TraceID { - return TraceID{orig: ((*ms.orig).TraceId)} -} - -// SetTraceID replaces the traceid associated with this Span. -func (ms Span) SetTraceID(v TraceID) { - (*ms.orig).TraceId = v.orig -} - -// SpanID returns the spanid associated with this Span. -func (ms Span) SpanID() SpanID { - return SpanID{orig: ((*ms.orig).SpanId)} -} - -// SetSpanID replaces the spanid associated with this Span. -func (ms Span) SetSpanID(v SpanID) { - (*ms.orig).SpanId = v.orig -} - -// TraceState returns the tracestate associated with this Span. -func (ms Span) TraceState() TraceState { - return TraceState((*ms.orig).TraceState) -} - -// SetTraceState replaces the tracestate associated with this Span. -func (ms Span) SetTraceState(v TraceState) { - (*ms.orig).TraceState = string(v) -} - -// ParentSpanID returns the parentspanid associated with this Span. -func (ms Span) ParentSpanID() SpanID { - return SpanID{orig: ((*ms.orig).ParentSpanId)} -} - -// SetParentSpanID replaces the parentspanid associated with this Span. -func (ms Span) SetParentSpanID(v SpanID) { - (*ms.orig).ParentSpanId = v.orig -} - -// Name returns the name associated with this Span. -func (ms Span) Name() string { - return (*ms.orig).Name -} - -// SetName replaces the name associated with this Span. -func (ms Span) SetName(v string) { - (*ms.orig).Name = v -} - -// Kind returns the kind associated with this Span. -func (ms Span) Kind() SpanKind { - return SpanKind((*ms.orig).Kind) -} - -// SetKind replaces the kind associated with this Span. -func (ms Span) SetKind(v SpanKind) { - (*ms.orig).Kind = otlptrace.Span_SpanKind(v) -} - -// StartTimestamp returns the starttimestamp associated with this Span. -func (ms Span) StartTimestamp() Timestamp { - return Timestamp((*ms.orig).StartTimeUnixNano) -} - -// SetStartTimestamp replaces the starttimestamp associated with this Span. -func (ms Span) SetStartTimestamp(v Timestamp) { - (*ms.orig).StartTimeUnixNano = uint64(v) -} - -// EndTimestamp returns the endtimestamp associated with this Span. -func (ms Span) EndTimestamp() Timestamp { - return Timestamp((*ms.orig).EndTimeUnixNano) -} - -// SetEndTimestamp replaces the endtimestamp associated with this Span. -func (ms Span) SetEndTimestamp(v Timestamp) { - (*ms.orig).EndTimeUnixNano = uint64(v) -} - -// Attributes returns the Attributes associated with this Span. -func (ms Span) Attributes() AttributeMap { - return newAttributeMap(&(*ms.orig).Attributes) -} - -// DroppedAttributesCount returns the droppedattributescount associated with this Span. -func (ms Span) DroppedAttributesCount() uint32 { - return (*ms.orig).DroppedAttributesCount -} - -// SetDroppedAttributesCount replaces the droppedattributescount associated with this Span. -func (ms Span) SetDroppedAttributesCount(v uint32) { - (*ms.orig).DroppedAttributesCount = v -} - -// Events returns the Events associated with this Span. -func (ms Span) Events() SpanEventSlice { - return newSpanEventSlice(&(*ms.orig).Events) -} - -// DroppedEventsCount returns the droppedeventscount associated with this Span. -func (ms Span) DroppedEventsCount() uint32 { - return (*ms.orig).DroppedEventsCount -} - -// SetDroppedEventsCount replaces the droppedeventscount associated with this Span. -func (ms Span) SetDroppedEventsCount(v uint32) { - (*ms.orig).DroppedEventsCount = v -} - -// Links returns the Links associated with this Span. -func (ms Span) Links() SpanLinkSlice { - return newSpanLinkSlice(&(*ms.orig).Links) -} - -// DroppedLinksCount returns the droppedlinkscount associated with this Span. -func (ms Span) DroppedLinksCount() uint32 { - return (*ms.orig).DroppedLinksCount -} - -// SetDroppedLinksCount replaces the droppedlinkscount associated with this Span. -func (ms Span) SetDroppedLinksCount(v uint32) { - (*ms.orig).DroppedLinksCount = v -} - -// Status returns the status associated with this Span. -func (ms Span) Status() SpanStatus { - return newSpanStatus(&(*ms.orig).Status) -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms Span) CopyTo(dest Span) { - dest.SetTraceID(ms.TraceID()) - dest.SetSpanID(ms.SpanID()) - dest.SetTraceState(ms.TraceState()) - dest.SetParentSpanID(ms.ParentSpanID()) - dest.SetName(ms.Name()) - dest.SetKind(ms.Kind()) - dest.SetStartTimestamp(ms.StartTimestamp()) - dest.SetEndTimestamp(ms.EndTimestamp()) - ms.Attributes().CopyTo(dest.Attributes()) - dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) - ms.Events().CopyTo(dest.Events()) - dest.SetDroppedEventsCount(ms.DroppedEventsCount()) - ms.Links().CopyTo(dest.Links()) - dest.SetDroppedLinksCount(ms.DroppedLinksCount()) - ms.Status().CopyTo(dest.Status()) -} - -// SpanEventSlice logically represents a slice of SpanEvent. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewSpanEventSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanEventSlice struct { - // orig points to the slice otlptrace.Span_Event field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlptrace.Span_Event -} - -func newSpanEventSlice(orig *[]*otlptrace.Span_Event) SpanEventSlice { - return SpanEventSlice{orig} -} - -// NewSpanEventSlice creates a SpanEventSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewSpanEventSlice() SpanEventSlice { - orig := []*otlptrace.Span_Event(nil) - return SpanEventSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewSpanEventSlice()". -func (es SpanEventSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es SpanEventSlice) At(ix int) SpanEvent { - return newSpanEvent((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es SpanEventSlice) CopyTo(dest SpanEventSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newSpanEvent((*es.orig)[i]).CopyTo(newSpanEvent((*dest.orig)[i])) - } - return - } - origs := make([]otlptrace.Span_Event, srcLen) - wrappers := make([]*otlptrace.Span_Event, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newSpanEvent((*es.orig)[i]).CopyTo(newSpanEvent(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new SpanEventSlice can be initialized: -// es := NewSpanEventSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es SpanEventSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlptrace.Span_Event, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlptrace.Span_Event, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the SpanEventSlice by one and set the -// given SpanEvent at that new position. The original SpanEvent -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es SpanEventSlice) Append(e SpanEvent) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty SpanEvent. -// It returns the newly added SpanEvent. -func (es SpanEventSlice) AppendEmpty() SpanEvent { - *es.orig = append(*es.orig, &otlptrace.Span_Event{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. See OTLP for event definition. -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSpanEvent function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanEvent struct { - orig *otlptrace.Span_Event -} - -func newSpanEvent(orig *otlptrace.Span_Event) SpanEvent { - return SpanEvent{orig: orig} -} - -// NewSpanEvent creates a new empty SpanEvent. -// -// This must be used only in testing code since no "Set" method available. -func NewSpanEvent() SpanEvent { - return newSpanEvent(&otlptrace.Span_Event{}) -} - -// Timestamp returns the timestamp associated with this SpanEvent. -func (ms SpanEvent) Timestamp() Timestamp { - return Timestamp((*ms.orig).TimeUnixNano) -} - -// SetTimestamp replaces the timestamp associated with this SpanEvent. -func (ms SpanEvent) SetTimestamp(v Timestamp) { - (*ms.orig).TimeUnixNano = uint64(v) -} - -// Name returns the name associated with this SpanEvent. -func (ms SpanEvent) Name() string { - return (*ms.orig).Name -} - -// SetName replaces the name associated with this SpanEvent. -func (ms SpanEvent) SetName(v string) { - (*ms.orig).Name = v -} - -// Attributes returns the Attributes associated with this SpanEvent. -func (ms SpanEvent) Attributes() AttributeMap { - return newAttributeMap(&(*ms.orig).Attributes) -} - -// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent. -func (ms SpanEvent) DroppedAttributesCount() uint32 { - return (*ms.orig).DroppedAttributesCount -} - -// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent. -func (ms SpanEvent) SetDroppedAttributesCount(v uint32) { - (*ms.orig).DroppedAttributesCount = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms SpanEvent) CopyTo(dest SpanEvent) { - dest.SetTimestamp(ms.Timestamp()) - dest.SetName(ms.Name()) - ms.Attributes().CopyTo(dest.Attributes()) - dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) -} - -// SpanLinkSlice logically represents a slice of SpanLink. -// -// This is a reference type. If passed by value and callee modifies it, the -// caller will see the modification. -// -// Must use NewSpanLinkSlice function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanLinkSlice struct { - // orig points to the slice otlptrace.Span_Link field contained somewhere else. - // We use pointer-to-slice to be able to modify it in functions like Resize. - orig *[]*otlptrace.Span_Link -} - -func newSpanLinkSlice(orig *[]*otlptrace.Span_Link) SpanLinkSlice { - return SpanLinkSlice{orig} -} - -// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements. -// Can use "Resize" to initialize with a given length. -func NewSpanLinkSlice() SpanLinkSlice { - orig := []*otlptrace.Span_Link(nil) - return SpanLinkSlice{&orig} -} - -// Len returns the number of elements in the slice. -// -// Returns "0" for a newly instance created with "NewSpanLinkSlice()". -func (es SpanLinkSlice) Len() int { - return len(*es.orig) -} - -// At returns the element at the given index. -// -// This function is used mostly for iterating over all the values in the slice: -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// ... // Do something with the element -// } -func (es SpanLinkSlice) At(ix int) SpanLink { - return newSpanLink((*es.orig)[ix]) -} - -// CopyTo copies all elements from the current slice to the dest. -func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) { - srcLen := es.Len() - destCap := cap(*dest.orig) - if srcLen <= destCap { - (*dest.orig) = (*dest.orig)[:srcLen:destCap] - for i := range *es.orig { - newSpanLink((*es.orig)[i]).CopyTo(newSpanLink((*dest.orig)[i])) - } - return - } - origs := make([]otlptrace.Span_Link, srcLen) - wrappers := make([]*otlptrace.Span_Link, srcLen) - for i := range *es.orig { - wrappers[i] = &origs[i] - newSpanLink((*es.orig)[i]).CopyTo(newSpanLink(wrappers[i])) - } - *dest.orig = wrappers -} - -// Resize is an operation that resizes the slice: -// 1. If the newLen <= len then equivalent with slice[0:newLen:cap]. -// 2. If the newLen > len then (newLen - cap) empty elements will be appended to the slice. -// -// Here is how a new SpanLinkSlice can be initialized: -// es := NewSpanLinkSlice() -// es.Resize(4) -// for i := 0; i < es.Len(); i++ { -// e := es.At(i) -// // Here should set all the values for e. -// } -func (es SpanLinkSlice) Resize(newLen int) { - oldLen := len(*es.orig) - oldCap := cap(*es.orig) - if newLen <= oldLen { - *es.orig = (*es.orig)[:newLen:oldCap] - return - } - - if newLen > oldCap { - newOrig := make([]*otlptrace.Span_Link, oldLen, newLen) - copy(newOrig, *es.orig) - *es.orig = newOrig - } - - // Add extra empty elements to the array. - extraOrigs := make([]otlptrace.Span_Link, newLen-oldLen) - for i := range extraOrigs { - *es.orig = append(*es.orig, &extraOrigs[i]) - } -} - -// Append will increase the length of the SpanLinkSlice by one and set the -// given SpanLink at that new position. The original SpanLink -// could still be referenced so do not reuse it after passing it to this -// method. -// Deprecated: Use AppendEmpty. -func (es SpanLinkSlice) Append(e SpanLink) { - *es.orig = append(*es.orig, e.orig) -} - -// AppendEmpty will append to the end of the slice an empty SpanLink. -// It returns the newly added SpanLink. -func (es SpanLinkSlice) AppendEmpty() SpanLink { - *es.orig = append(*es.orig, &otlptrace.Span_Link{}) - return es.At(es.Len() - 1) -} - -// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. -// The current slice will be cleared. -func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) { - if *dest.orig == nil { - // We can simply move the entire vector and avoid any allocations. - *dest.orig = *es.orig - } else { - *dest.orig = append(*dest.orig, *es.orig...) - } - *es.orig = nil -} - -// RemoveIf calls f sequentially for each element present in the slice. -// If f returns true, the element is removed from the slice. -func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) { - newLen := 0 - for i := 0; i < len(*es.orig); i++ { - if f(es.At(i)) { - continue - } - if newLen == i { - // Nothing to move, element is at the right place. - newLen++ - continue - } - (*es.orig)[newLen] = (*es.orig)[i] - newLen++ - } - // TODO: Prevent memory leak by erasing truncated values. - *es.orig = (*es.orig)[:newLen] -} - -// SpanLink is a pointer from the current span to another span in the same trace or in a -// different trace. -// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSpanLink function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanLink struct { - orig *otlptrace.Span_Link -} - -func newSpanLink(orig *otlptrace.Span_Link) SpanLink { - return SpanLink{orig: orig} -} - -// NewSpanLink creates a new empty SpanLink. -// -// This must be used only in testing code since no "Set" method available. -func NewSpanLink() SpanLink { - return newSpanLink(&otlptrace.Span_Link{}) -} - -// TraceID returns the traceid associated with this SpanLink. -func (ms SpanLink) TraceID() TraceID { - return TraceID{orig: ((*ms.orig).TraceId)} -} - -// SetTraceID replaces the traceid associated with this SpanLink. -func (ms SpanLink) SetTraceID(v TraceID) { - (*ms.orig).TraceId = v.orig -} - -// SpanID returns the spanid associated with this SpanLink. -func (ms SpanLink) SpanID() SpanID { - return SpanID{orig: ((*ms.orig).SpanId)} -} - -// SetSpanID replaces the spanid associated with this SpanLink. -func (ms SpanLink) SetSpanID(v SpanID) { - (*ms.orig).SpanId = v.orig -} - -// TraceState returns the tracestate associated with this SpanLink. -func (ms SpanLink) TraceState() TraceState { - return TraceState((*ms.orig).TraceState) -} - -// SetTraceState replaces the tracestate associated with this SpanLink. -func (ms SpanLink) SetTraceState(v TraceState) { - (*ms.orig).TraceState = string(v) -} - -// Attributes returns the Attributes associated with this SpanLink. -func (ms SpanLink) Attributes() AttributeMap { - return newAttributeMap(&(*ms.orig).Attributes) -} - -// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink. -func (ms SpanLink) DroppedAttributesCount() uint32 { - return (*ms.orig).DroppedAttributesCount -} - -// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink. -func (ms SpanLink) SetDroppedAttributesCount(v uint32) { - (*ms.orig).DroppedAttributesCount = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms SpanLink) CopyTo(dest SpanLink) { - dest.SetTraceID(ms.TraceID()) - dest.SetSpanID(ms.SpanID()) - dest.SetTraceState(ms.TraceState()) - ms.Attributes().CopyTo(dest.Attributes()) - dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) -} - -// SpanStatus is an optional final status for this span. Semantically, when Status was not -// set, that means the span ended without errors and to assume Status.Ok (code = 0). -// -// This is a reference type, if passed by value and callee modifies it the -// caller will see the modification. -// -// Must use NewSpanStatus function to create new instances. -// Important: zero-initialized instance is not valid for use. -type SpanStatus struct { - orig *otlptrace.Status -} - -func newSpanStatus(orig *otlptrace.Status) SpanStatus { - return SpanStatus{orig: orig} -} - -// NewSpanStatus creates a new empty SpanStatus. -// -// This must be used only in testing code since no "Set" method available. -func NewSpanStatus() SpanStatus { - return newSpanStatus(&otlptrace.Status{}) -} - -// Code returns the code associated with this SpanStatus. -func (ms SpanStatus) Code() StatusCode { - return StatusCode((*ms.orig).Code) -} - -// Message returns the message associated with this SpanStatus. -func (ms SpanStatus) Message() string { - return (*ms.orig).Message -} - -// SetMessage replaces the message associated with this SpanStatus. -func (ms SpanStatus) SetMessage(v string) { - (*ms.orig).Message = v -} - -// CopyTo copies all properties from the current struct to the dest. -func (ms SpanStatus) CopyTo(dest SpanStatus) { - dest.SetCode(ms.Code()) - dest.SetMessage(ms.Message()) -} diff --git a/internal/otel_collector/consumer/pdata/logs.go b/internal/otel_collector/consumer/pdata/logs.go deleted file mode 100644 index 6d528e3b58c..00000000000 --- a/internal/otel_collector/consumer/pdata/logs.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "go.opentelemetry.io/collector/internal" - otlpcollectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlplogs "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" -) - -// This file defines in-memory data structures to represent logs. - -// Logs is the top-level struct that is propagated through the logs pipeline. -// -// This is a reference type (like builtin map). -// -// Must use NewLogs functions to create new instances. -// Important: zero-initialized instance is not valid for use. -type Logs struct { - orig *otlpcollectorlog.ExportLogsServiceRequest -} - -// NewLogs creates a new Logs. -func NewLogs() Logs { - return Logs{orig: &otlpcollectorlog.ExportLogsServiceRequest{}} -} - -// LogsFromInternalRep creates the internal Logs representation from the ProtoBuf. Should -// not be used outside this module. This is intended to be used only by OTLP exporter and -// File exporter, which legitimately need to work with OTLP Protobuf structs. -func LogsFromInternalRep(logs internal.LogsWrapper) Logs { - return Logs{orig: internal.LogsToOtlp(logs)} -} - -// LogsFromOtlpProtoBytes converts OTLP Collector ExportLogsServiceRequest -// ProtoBuf bytes to the internal Logs. -// -// Returns an invalid Logs instance if error is not nil. -func LogsFromOtlpProtoBytes(data []byte) (Logs, error) { - req := otlpcollectorlog.ExportLogsServiceRequest{} - if err := req.Unmarshal(data); err != nil { - return Logs{}, err - } - return Logs{orig: &req}, nil -} - -// InternalRep returns internal representation of the logs. Should not be used outside -// this module. This is intended to be used only by OTLP exporter and File exporter, -// which legitimately need to work with OTLP Protobuf structs. -func (ld Logs) InternalRep() internal.LogsWrapper { - return internal.LogsFromOtlp(ld.orig) -} - -// ToOtlpProtoBytes converts this Logs to the OTLP Collector ExportLogsServiceRequest -// ProtoBuf bytes. -// -// Returns an nil byte-array if error is not nil. -func (ld Logs) ToOtlpProtoBytes() ([]byte, error) { - return ld.orig.Marshal() -} - -// Clone returns a copy of Logs. -func (ld Logs) Clone() Logs { - cloneLd := NewLogs() - ld.ResourceLogs().CopyTo(cloneLd.ResourceLogs()) - return cloneLd -} - -// LogRecordCount calculates the total number of log records. -func (ld Logs) LogRecordCount() int { - logCount := 0 - rss := ld.ResourceLogs() - for i := 0; i < rss.Len(); i++ { - rs := rss.At(i) - ill := rs.InstrumentationLibraryLogs() - for i := 0; i < ill.Len(); i++ { - logs := ill.At(i) - logCount += logs.Logs().Len() - } - } - return logCount -} - -// OtlpProtoSize returns the size in bytes of this Logs encoded as OTLP Collector -// ExportLogsServiceRequest ProtoBuf bytes. -func (ld Logs) OtlpProtoSize() int { - return ld.orig.Size() -} - -// ResourceLogs returns the ResourceLogsSlice associated with this Logs. -func (ld Logs) ResourceLogs() ResourceLogsSlice { - return newResourceLogsSlice(&ld.orig.ResourceLogs) -} - -// SeverityNumber is the public alias of otlplogs.SeverityNumber from internal package. -type SeverityNumber int32 - -const ( - SeverityNumberUNDEFINED = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) - SeverityNumberTRACE = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) - SeverityNumberTRACE2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) - SeverityNumberTRACE3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) - SeverityNumberTRACE4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) - SeverityNumberDEBUG = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) - SeverityNumberDEBUG2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) - SeverityNumberDEBUG3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) - SeverityNumberDEBUG4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) - SeverityNumberINFO = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) - SeverityNumberINFO2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) - SeverityNumberINFO3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) - SeverityNumberINFO4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) - SeverityNumberWARN = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) - SeverityNumberWARN2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) - SeverityNumberWARN3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) - SeverityNumberWARN4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) - SeverityNumberERROR = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) - SeverityNumberERROR2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) - SeverityNumberERROR3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) - SeverityNumberERROR4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) - SeverityNumberFATAL = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) - SeverityNumberFATAL2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) - SeverityNumberFATAL3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) - SeverityNumberFATAL4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) -) - -func (sn SeverityNumber) String() string { return otlplogs.SeverityNumber(sn).String() } diff --git a/internal/otel_collector/consumer/pdata/metrics.go b/internal/otel_collector/consumer/pdata/metrics.go deleted file mode 100644 index eafc3cae261..00000000000 --- a/internal/otel_collector/consumer/pdata/metrics.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "go.opentelemetry.io/collector/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" -) - -// AggregationTemporality defines how a metric aggregator reports aggregated values. -// It describes how those values relate to the time interval over which they are aggregated. -type AggregationTemporality int32 - -const ( - // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used. - AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) - // AggregationTemporalityDelta is an AggregationTemporality for a metric aggregator which reports changes since last report time. - AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) - // AggregationTemporalityCumulative is an AggregationTemporality for a metric aggregator which reports changes since a fixed start time. - AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) -) - -// String returns the string representation of the AggregationTemporality. -func (at AggregationTemporality) String() string { - return otlpmetrics.AggregationTemporality(at).String() -} - -// Metrics is an opaque interface that allows transition to the new internal Metrics data, but also facilitate the -// transition to the new components especially for traces. -// -// Outside of the core repository the metrics pipeline cannot be converted to the new model since data.MetricData is -// part of the internal package. -type Metrics struct { - orig *otlpcollectormetrics.ExportMetricsServiceRequest -} - -// NewMetrics creates a new Metrics. -func NewMetrics() Metrics { - return Metrics{orig: &otlpcollectormetrics.ExportMetricsServiceRequest{}} -} - -// MetricsFromInternalRep creates Metrics from the internal representation. -// Should not be used outside this module. -func MetricsFromInternalRep(wrapper internal.MetricsWrapper) Metrics { - return Metrics{orig: internal.MetricsToOtlp(wrapper)} -} - -// MetricsFromOtlpProtoBytes converts the OTLP Collector ExportMetricsServiceRequest -// ProtoBuf bytes to Metrics. -// -// Returns an invalid Metrics instance if error is not nil. -func MetricsFromOtlpProtoBytes(data []byte) (Metrics, error) { - req := otlpcollectormetrics.ExportMetricsServiceRequest{} - if err := req.Unmarshal(data); err != nil { - return Metrics{}, err - } - return Metrics{orig: &req}, nil -} - -// InternalRep returns internal representation of the Metrics. -// Should not be used outside this module. -func (md Metrics) InternalRep() internal.MetricsWrapper { - return internal.MetricsFromOtlp(md.orig) -} - -// ToOtlpProtoBytes converts this Metrics to the OTLP Collector ExportMetricsServiceRequest -// ProtoBuf bytes. -// -// Returns an nil byte-array if error is not nil. -func (md Metrics) ToOtlpProtoBytes() ([]byte, error) { - return md.orig.Marshal() -} - -// Clone returns a copy of MetricData. -func (md Metrics) Clone() Metrics { - cloneMd := NewMetrics() - md.ResourceMetrics().CopyTo(cloneMd.ResourceMetrics()) - return cloneMd -} - -// ResourceMetrics returns the ResourceMetricsSlice associated with this Metrics. -func (md Metrics) ResourceMetrics() ResourceMetricsSlice { - return newResourceMetricsSlice(&md.orig.ResourceMetrics) -} - -// MetricCount calculates the total number of metrics. -func (md Metrics) MetricCount() int { - metricCount := 0 - rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - rm := rms.At(i) - ilms := rm.InstrumentationLibraryMetrics() - for j := 0; j < ilms.Len(); j++ { - ilm := ilms.At(j) - metricCount += ilm.Metrics().Len() - } - } - return metricCount -} - -// OtlpProtoSize returns the size in bytes of this Metrics encoded as OTLP Collector -// ExportMetricsServiceRequest ProtoBuf bytes. -func (md Metrics) OtlpProtoSize() int { - return md.orig.Size() -} - -// MetricAndDataPointCount calculates the total number of metrics and data points. -func (md Metrics) MetricAndDataPointCount() (metricCount int, dataPointCount int) { - rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - rm := rms.At(i) - ilms := rm.InstrumentationLibraryMetrics() - for j := 0; j < ilms.Len(); j++ { - ilm := ilms.At(j) - metrics := ilm.Metrics() - metricCount += metrics.Len() - ms := ilm.Metrics() - for k := 0; k < ms.Len(); k++ { - m := ms.At(k) - switch m.DataType() { - case MetricDataTypeIntGauge: - dataPointCount += m.IntGauge().DataPoints().Len() - case MetricDataTypeDoubleGauge: - dataPointCount += m.DoubleGauge().DataPoints().Len() - case MetricDataTypeIntSum: - dataPointCount += m.IntSum().DataPoints().Len() - case MetricDataTypeDoubleSum: - dataPointCount += m.DoubleSum().DataPoints().Len() - case MetricDataTypeIntHistogram: - dataPointCount += m.IntHistogram().DataPoints().Len() - case MetricDataTypeHistogram: - dataPointCount += m.Histogram().DataPoints().Len() - case MetricDataTypeSummary: - dataPointCount += m.Summary().DataPoints().Len() - } - } - } - } - return -} - -// MetricDataType specifies the type of data in a Metric. -type MetricDataType int32 - -const ( - MetricDataTypeNone MetricDataType = iota - MetricDataTypeIntGauge - MetricDataTypeDoubleGauge - MetricDataTypeIntSum - MetricDataTypeDoubleSum - MetricDataTypeIntHistogram - MetricDataTypeHistogram - MetricDataTypeSummary -) - -func (mdt MetricDataType) String() string { - switch mdt { - case MetricDataTypeNone: - return "None" - case MetricDataTypeIntGauge: - return "IntGauge" - case MetricDataTypeDoubleGauge: - return "DoubleGauge" - case MetricDataTypeIntSum: - return "IntSum" - case MetricDataTypeDoubleSum: - return "DoubleSum" - case MetricDataTypeIntHistogram: - return "IntHistogram" - case MetricDataTypeHistogram: - return "Histogram" - case MetricDataTypeSummary: - return "Summary" - } - return "" -} - -// DataType returns the type of the data for this Metric. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) DataType() MetricDataType { - switch ms.orig.Data.(type) { - case *otlpmetrics.Metric_IntGauge: - return MetricDataTypeIntGauge - case *otlpmetrics.Metric_DoubleGauge: - return MetricDataTypeDoubleGauge - case *otlpmetrics.Metric_IntSum: - return MetricDataTypeIntSum - case *otlpmetrics.Metric_DoubleSum: - return MetricDataTypeDoubleSum - case *otlpmetrics.Metric_IntHistogram: - return MetricDataTypeIntHistogram - case *otlpmetrics.Metric_DoubleHistogram: - return MetricDataTypeHistogram - case *otlpmetrics.Metric_DoubleSummary: - return MetricDataTypeSummary - } - return MetricDataTypeNone -} - -// SetDataType clears any existing data and initialize it with an empty data of the given type. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) SetDataType(ty MetricDataType) { - switch ty { - case MetricDataTypeIntGauge: - ms.orig.Data = &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} - case MetricDataTypeDoubleGauge: - ms.orig.Data = &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} - case MetricDataTypeIntSum: - ms.orig.Data = &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{}} - case MetricDataTypeDoubleSum: - ms.orig.Data = &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{}} - case MetricDataTypeIntHistogram: - ms.orig.Data = &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{}} - case MetricDataTypeHistogram: - ms.orig.Data = &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{}} - case MetricDataTypeSummary: - ms.orig.Data = &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} - } -} - -// IntGauge returns the data as IntGauge. -// Calling this function when DataType() != MetricDataTypeIntGauge will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) IntGauge() IntGauge { - return newIntGauge(ms.orig.Data.(*otlpmetrics.Metric_IntGauge).IntGauge) -} - -// DoubleGauge returns the data as DoubleGauge. -// Calling this function when DataType() != MetricDataTypeDoubleGauge will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) DoubleGauge() DoubleGauge { - return newDoubleGauge(ms.orig.Data.(*otlpmetrics.Metric_DoubleGauge).DoubleGauge) -} - -// IntSum returns the data as IntSum. -// Calling this function when DataType() != MetricDataTypeIntSum will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) IntSum() IntSum { - return newIntSum(ms.orig.Data.(*otlpmetrics.Metric_IntSum).IntSum) -} - -// DoubleSum returns the data as DoubleSum. -// Calling this function when DataType() != MetricDataTypeDoubleSum will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) DoubleSum() DoubleSum { - return newDoubleSum(ms.orig.Data.(*otlpmetrics.Metric_DoubleSum).DoubleSum) -} - -// IntHistogram returns the data as IntHistogram. -// Calling this function when DataType() != MetricDataTypeIntHistogram will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) IntHistogram() IntHistogram { - return newIntHistogram(ms.orig.Data.(*otlpmetrics.Metric_IntHistogram).IntHistogram) -} - -// Histogram returns the data as Histogram. -// Calling this function when DataType() != MetricDataTypeHistogram will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) Histogram() Histogram { - return newHistogram(ms.orig.Data.(*otlpmetrics.Metric_DoubleHistogram).DoubleHistogram) -} - -// Summary returns the data as Summary. -// Calling this function when DataType() != MetricDataTypeSummary will cause a panic. -// Calling this function on zero-initialized Metric will cause a panic. -func (ms Metric) Summary() Summary { - return newSummary(ms.orig.Data.(*otlpmetrics.Metric_DoubleSummary).DoubleSummary) -} - -func copyData(src, dest *otlpmetrics.Metric) { - switch srcData := (src).Data.(type) { - case *otlpmetrics.Metric_IntGauge: - data := &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} - newIntGauge(srcData.IntGauge).CopyTo(newIntGauge(data.IntGauge)) - dest.Data = data - case *otlpmetrics.Metric_DoubleGauge: - data := &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} - newDoubleGauge(srcData.DoubleGauge).CopyTo(newDoubleGauge(data.DoubleGauge)) - dest.Data = data - case *otlpmetrics.Metric_IntSum: - data := &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{}} - newIntSum(srcData.IntSum).CopyTo(newIntSum(data.IntSum)) - dest.Data = data - case *otlpmetrics.Metric_DoubleSum: - data := &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{}} - newDoubleSum(srcData.DoubleSum).CopyTo(newDoubleSum(data.DoubleSum)) - dest.Data = data - case *otlpmetrics.Metric_IntHistogram: - data := &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{}} - newIntHistogram(srcData.IntHistogram).CopyTo(newIntHistogram(data.IntHistogram)) - dest.Data = data - case *otlpmetrics.Metric_DoubleHistogram: - data := &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{}} - newHistogram(srcData.DoubleHistogram).CopyTo(newHistogram(data.DoubleHistogram)) - dest.Data = data - case *otlpmetrics.Metric_DoubleSummary: - data := &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} - newSummary(srcData.DoubleSummary).CopyTo(newSummary(data.DoubleSummary)) - dest.Data = data - } -} diff --git a/internal/otel_collector/consumer/pdata/spanid.go b/internal/otel_collector/consumer/pdata/spanid.go deleted file mode 100644 index 9f36b270591..00000000000 --- a/internal/otel_collector/consumer/pdata/spanid.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "go.opentelemetry.io/collector/internal/data" -) - -// SpanID is an alias of OTLP SpanID data type. -type SpanID struct { - orig data.SpanID -} - -// InvalidSpanID returns an empty (all zero bytes) SpanID. -func InvalidSpanID() SpanID { - return SpanID{orig: data.NewSpanID([8]byte{})} -} - -// NewSpanID returns a new SpanID from the given byte array. -func NewSpanID(bytes [8]byte) SpanID { - return SpanID{orig: data.NewSpanID(bytes)} -} - -// Bytes returns the byte array representation of the SpanID. -func (t SpanID) Bytes() [8]byte { - return t.orig.Bytes() -} - -// HexString returns hex representation of the SpanID. -func (t SpanID) HexString() string { - return t.orig.HexString() -} - -// IsEmpty returns true if id doesn't contain at least one non-zero byte. -func (t SpanID) IsEmpty() bool { - return t.orig.IsEmpty() -} diff --git a/internal/otel_collector/consumer/pdata/timestamp.go b/internal/otel_collector/consumer/pdata/timestamp.go deleted file mode 100644 index 9ce0977ab94..00000000000 --- a/internal/otel_collector/consumer/pdata/timestamp.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "time" -) - -// Timestamp is a time specified as UNIX Epoch time in nanoseconds since -// 00:00:00 UTC on 1 January 1970. -type Timestamp uint64 - -// TimestampFromTime constructs a new Timestamp from the provided time.Time. -func TimestampFromTime(t time.Time) Timestamp { - return Timestamp(uint64(t.UnixNano())) -} - -// AsTime converts this to a time.Time. -func (ts Timestamp) AsTime() time.Time { - return time.Unix(0, int64(ts)).UTC() -} - -// String returns the string representation of this in UTC. -func (ts Timestamp) String() string { - return ts.AsTime().String() -} diff --git a/internal/otel_collector/consumer/pdata/traceid.go b/internal/otel_collector/consumer/pdata/traceid.go deleted file mode 100644 index c6a662dca10..00000000000 --- a/internal/otel_collector/consumer/pdata/traceid.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "go.opentelemetry.io/collector/internal/data" -) - -// TraceID is an alias of OTLP TraceID data type. -type TraceID struct { - orig data.TraceID -} - -// InvalidTraceID returns an empty (all zero bytes) TraceID. -func InvalidTraceID() TraceID { - return TraceID{orig: data.NewTraceID([16]byte{})} -} - -// NewTraceID returns a new TraceID from the given byte array. -func NewTraceID(bytes [16]byte) TraceID { - return TraceID{orig: data.NewTraceID(bytes)} -} - -// Bytes returns the byte array representation of the TraceID. -func (t TraceID) Bytes() [16]byte { - return t.orig.Bytes() -} - -// HexString returns hex representation of the TraceID. -func (t TraceID) HexString() string { - return t.orig.HexString() -} - -// IsEmpty returns true if id doesn't contain at least one non-zero byte. -func (t TraceID) IsEmpty() bool { - return t.orig.IsEmpty() -} diff --git a/internal/otel_collector/consumer/pdata/traces.go b/internal/otel_collector/consumer/pdata/traces.go deleted file mode 100644 index a737e60e3d2..00000000000 --- a/internal/otel_collector/consumer/pdata/traces.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdata - -import ( - "go.opentelemetry.io/collector/internal" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" -) - -// This file defines in-memory data structures to represent traces (spans). - -// Traces is the top-level struct that is propagated through the traces pipeline. -type Traces struct { - orig *otlpcollectortrace.ExportTraceServiceRequest -} - -// NewTraces creates a new Traces. -func NewTraces() Traces { - return Traces{orig: &otlpcollectortrace.ExportTraceServiceRequest{}} -} - -// TracesFromInternalRep creates Traces from the internal representation. -// Should not be used outside this module. -func TracesFromInternalRep(wrapper internal.TracesWrapper) Traces { - return Traces{orig: internal.TracesToOtlp(wrapper)} -} - -// TracesFromOtlpProtoBytes converts OTLP Collector ExportTraceServiceRequest -// ProtoBuf bytes to the internal Traces. -// -// Returns an invalid Traces instance if error is not nil. -func TracesFromOtlpProtoBytes(data []byte) (Traces, error) { - req := otlpcollectortrace.ExportTraceServiceRequest{} - if err := req.Unmarshal(data); err != nil { - return Traces{}, err - } - internal.TracesCompatibilityChanges(&req) - return Traces{orig: &req}, nil -} - -// InternalRep returns internal representation of the Traces. -// Should not be used outside this module. -func (td Traces) InternalRep() internal.TracesWrapper { - return internal.TracesFromOtlp(td.orig) -} - -// ToOtlpProtoBytes converts this Traces to the OTLP Collector ExportTraceServiceRequest -// ProtoBuf bytes. -// -// Returns an nil byte-array if error is not nil. -func (td Traces) ToOtlpProtoBytes() ([]byte, error) { - return td.orig.Marshal() -} - -// Clone returns a copy of Traces. -func (td Traces) Clone() Traces { - cloneTd := NewTraces() - td.ResourceSpans().CopyTo(cloneTd.ResourceSpans()) - return cloneTd -} - -// SpanCount calculates the total number of spans. -func (td Traces) SpanCount() int { - spanCount := 0 - rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { - rs := rss.At(i) - ilss := rs.InstrumentationLibrarySpans() - for j := 0; j < ilss.Len(); j++ { - spanCount += ilss.At(j).Spans().Len() - } - } - return spanCount -} - -// OtlpProtoSize returns the size in bytes of this Traces encoded as OTLP Collector -// ExportTraceServiceRequest ProtoBuf bytes. -func (td Traces) OtlpProtoSize() int { - return td.orig.Size() -} - -// ResourceSpans returns the ResourceSpansSlice associated with this Metrics. -func (td Traces) ResourceSpans() ResourceSpansSlice { - return newResourceSpansSlice(&td.orig.ResourceSpans) -} - -// TraceState in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header -type TraceState string - -const ( - // TraceStateEmpty represents the empty TraceState. - TraceStateEmpty TraceState = "" -) - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type SpanKind int32 - -// String returns the string representation of the SpanKind. -func (sk SpanKind) String() string { return otlptrace.Span_SpanKind(sk).String() } - -const ( - // SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used. - SpanKindUnspecified = SpanKind(0) - // SpanKindInternal indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) - // SpanKindServer indicates that the span covers server-side handling of an RPC or other - // remote network request. - SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) - // SpanKindClient indicates that the span describes a request to some remote service. - SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) - // SpanKindProducer indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. - // A PRODUCER span ends when the message was accepted by the broker while the logical processing of - // the message might span a much longer time. - SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) - // SpanKindConsumer indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship between - // producer and consumer spans. - SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) -) - -// StatusCode mirrors the codes defined at -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -type StatusCode int32 - -const ( - StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET) - StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK) - StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR) -) - -func (sc StatusCode) String() string { return otlptrace.Status_StatusCode(sc).String() } - -// SetCode replaces the code associated with this SpanStatus. -func (ms SpanStatus) SetCode(v StatusCode) { - ms.orig.Code = otlptrace.Status_StatusCode(v) - - // According to OTLP spec we also need to set the deprecated_code field as we are a new sender: - // See https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L239 - switch v { - case StatusCodeUnset, StatusCodeOk: - ms.orig.DeprecatedCode = otlptrace.Status_DEPRECATED_STATUS_CODE_OK - case StatusCodeError: - ms.orig.DeprecatedCode = otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR - } -} diff --git a/internal/otel_collector/consumer/simple/metrics.go b/internal/otel_collector/consumer/simple/metrics.go deleted file mode 100644 index 7bb6eacde68..00000000000 --- a/internal/otel_collector/consumer/simple/metrics.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2020 The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple - -import ( - "fmt" - "sync" - "time" - - "go.opentelemetry.io/collector/consumer/pdata" -) - -// Metrics facilitates building pdata.Metrics in receivers. It is meant -// to be much easier and more fluent than than using pdata.Metrics directly. -// All of the exported methods on it return the same instance of Metrics -// as a return value, allowing you to chain method calls easily, similar to the -// Java builder pattern. -// -// All of the public fields in this structure are meant to be set before the -// first data point is added, and should not be changed afterwards. -// -// The Metrics is designed for cases where receivers are generating -// metrics from scratch, where generally you will have a single datapoint per -// metric/label combination. -// -// One restriction this helper imposes is that a particular metric name must -// only be used with a single data type for all instances derived from a base -// helper, including the base instance. This restriction greatly simplifies -// the logic to reuse metrics for multiple datapoints and it is generally -// easier for backends to not have to deal with conflicting types anyway. -// -// It is NOT thread-safe, so you should use an external mutex if using it from -// multiple goroutines. -type Metrics struct { - // REQUIRED. A Metrics object that has been created with - // `pdata.NewMetrics()`. This is required to be set on the builder. All - // metrics added will go into this immediately upon invocation of Add* - // methods. Do not change this once initially set. - pdata.Metrics - - // MetricFactoriesByName is an optional map of metric factories that will - // be created with the appropriate name, description, and type field. This - // is intended to be used with the metadata code generation modules but can - // be used apart from that just as well. The returned metrics are expected - // to be initialized. - MetricFactoriesByName map[string]func(pdata.Metric) - - // If set, this instrumentation library name will be used for all metrics - // generated by this builder. This is meant to be set once at builder - // creation and not changed later. - InstrumentationLibraryName string - // If set, this instrumentation library version will be used for all - // metrics generated by this builder. This is meant to be set once at - // builder creation and not changed later. - InstrumentationLibraryVersion string - // These attributes will be added to the Resource object on all - // ResourceMetrics instances created by the builder. This is meant to be - // set once at builder creation and not changed later. - ResourceAttributes map[string]string - // This time will be used as the Timestamp for all metrics generated. It - // can be updated with a new timestamp at any time. - Timestamp time.Time - // A set of labels that will be applied to all datapoints emitted by the - // builder. - Labels map[string]string - - resourceMetricIdx **int - metricIdxByName map[string]int -} - -func (mb *Metrics) ensureInit() { - if mb.metricIdxByName == nil { - mb.metricIdxByName = map[string]int{} - } - if mb.resourceMetricIdx == nil { - var ip *int - mb.resourceMetricIdx = &ip - } -} - -// Clone the MetricBuilder. All of the maps copied will be deeply copied. -func (mb *Metrics) clone() *Metrics { - mb.ensureInit() - - return &Metrics{ - Metrics: mb.Metrics, - MetricFactoriesByName: mb.MetricFactoriesByName, - InstrumentationLibraryName: mb.InstrumentationLibraryName, - InstrumentationLibraryVersion: mb.InstrumentationLibraryVersion, - ResourceAttributes: cloneStringMap(mb.ResourceAttributes), - Timestamp: mb.Timestamp, - Labels: cloneStringMap(mb.Labels), - resourceMetricIdx: mb.resourceMetricIdx, - metricIdxByName: mb.metricIdxByName, - } -} - -// WithLabels returns a new, independent builder with additional labels. These -// labels will be combined with the Labels that can be set on the struct. -// All subsequent calls to create metrics will create metrics that use these -// labels. The input map's entries are copied so the map can be mutated freely -// by the caller afterwards without affecting the builder. -func (mb *Metrics) WithLabels(l map[string]string) *Metrics { - out := mb.clone() - - for k, v := range l { - out.Labels[k] = v - } - - return out -} - -// AsSafe returns an instance of this builder wrapped in -// SafeMetrics that ensures all of the public methods on this instance -// will be thread-safe between goroutines. You must explicitly type these -// instances as SafeMetrics. -func (mb Metrics) AsSafe() *SafeMetrics { - return &SafeMetrics{ - Metrics: &mb, - Mutex: &sync.Mutex{}, - } -} - -// AddGaugeDataPoint adds an integer gauge data point. -func (mb *Metrics) AddGaugeDataPoint(name string, metricValue int64) *Metrics { - typ := pdata.MetricDataTypeIntGauge - mb.addDataPoint(name, typ, metricValue) - return mb -} - -// AddDGaugeDataPoint adds a double gauge data point. -func (mb *Metrics) AddDGaugeDataPoint(name string, metricValue float64) *Metrics { - typ := pdata.MetricDataTypeDoubleGauge - mb.addDataPoint(name, typ, metricValue) - return mb -} - -// AddSumDataPoint adds an integer sum data point. -func (mb *Metrics) AddSumDataPoint(name string, metricValue int64) *Metrics { - typ := pdata.MetricDataTypeIntSum - mb.addDataPoint(name, typ, metricValue) - return mb -} - -// AddDSumDataPoint adds a double sum data point. -func (mb *Metrics) AddDSumDataPoint(name string, metricValue float64) *Metrics { - typ := pdata.MetricDataTypeDoubleSum - mb.addDataPoint(name, typ, metricValue) - return mb -} - -// AddHistogramRawDataPoint adds an integer histogram data point. -func (mb *Metrics) AddHistogramRawDataPoint(name string, hist pdata.IntHistogramDataPoint) *Metrics { - mb.addDataPoint(name, pdata.MetricDataTypeIntHistogram, hist) - return mb -} - -// AddDHistogramRawDataPoint adds a double histogram data point. -func (mb *Metrics) AddDHistogramRawDataPoint(name string, hist pdata.HistogramDataPoint) *Metrics { - mb.addDataPoint(name, pdata.MetricDataTypeHistogram, hist) - return mb -} - -func (mb *Metrics) getMetricsSlice() pdata.MetricSlice { - rms := mb.Metrics.ResourceMetrics() - if mb.resourceMetricIdx != nil && *mb.resourceMetricIdx != nil { - return rms.At(**mb.resourceMetricIdx).InstrumentationLibraryMetrics().At(0).Metrics() - } - - rmsLen := rms.Len() - rm := rms.AppendEmpty() - - res := rm.Resource() - for k, v := range mb.ResourceAttributes { - res.Attributes().Insert(k, pdata.NewAttributeValueString(v)) - } - - ilms := rm.InstrumentationLibraryMetrics() - ilm := ilms.AppendEmpty() - - il := ilm.InstrumentationLibrary() - il.SetName(mb.InstrumentationLibraryName) - il.SetVersion(mb.InstrumentationLibraryVersion) - - *mb.resourceMetricIdx = &rmsLen - - return ilm.Metrics() -} - -func (mb *Metrics) getOrCreateMetric(name string, typ pdata.MetricDataType) pdata.Metric { - mb.ensureInit() - - metricSlice := mb.getMetricsSlice() - - idx, ok := mb.metricIdxByName[name] - if ok { - return metricSlice.At(idx) - } - - metric := metricSlice.AppendEmpty() - if fac, ok := mb.MetricFactoriesByName[name]; ok { - fac(metric) - } else { - metric.SetName(name) - metric.SetDataType(typ) - } - - mb.metricIdxByName[name] = metricSlice.Len() - 1 - return metric -} - -func (mb *Metrics) addDataPoint(name string, typ pdata.MetricDataType, val interface{}) { - metric := mb.getOrCreateMetric(name, typ) - - // This protects against reusing the same metric name with different types. - if metric.DataType() != typ { - panic(fmt.Errorf("mismatched metric data types for metric %q: %q vs %q", metric.Name(), metric.DataType(), typ)) - } - - tsNano := pdata.TimestampFromTime(mb.Timestamp) - - switch typ { - case pdata.MetricDataTypeIntGauge: - m := metric.IntGauge() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - dp.SetValue(val.(int64)) - dp.SetTimestamp(tsNano) - - case pdata.MetricDataTypeIntSum: - m := metric.IntSum() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - dp.SetValue(val.(int64)) - dp.SetTimestamp(tsNano) - - case pdata.MetricDataTypeDoubleGauge: - m := metric.DoubleGauge() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - dp.SetValue(val.(float64)) - dp.SetTimestamp(tsNano) - - case pdata.MetricDataTypeDoubleSum: - m := metric.DoubleSum() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - dp.SetValue(val.(float64)) - dp.SetTimestamp(tsNano) - - case pdata.MetricDataTypeIntHistogram: - m := metric.IntHistogram() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - val.(pdata.IntHistogramDataPoint).CopyTo(dp) - dp.SetTimestamp(tsNano) - - case pdata.MetricDataTypeHistogram: - m := metric.Histogram() - dps := m.DataPoints() - dp := dps.AppendEmpty() - dp.LabelsMap().InitFromMap(mb.Labels) - val.(pdata.HistogramDataPoint).CopyTo(dp) - dp.SetTimestamp(tsNano) - - default: - panic("invalid metric type: " + typ.String()) - } -} - -func cloneStringMap(m map[string]string) map[string]string { - out := make(map[string]string, len(m)) - for k, v := range m { - out[k] = v - } - return out -} - -// SafeMetrics is a wrapper for Metrics that ensures the wrapped -// instance can be used safely across goroutines. It is meant to be created -// from the AsSafe on Metrics. -type SafeMetrics struct { - *sync.Mutex - *Metrics -} - -// WithLabels wraps Metrics.WithLabels. -func (mb *SafeMetrics) WithLabels(l map[string]string) *SafeMetrics { - mb.Lock() - defer mb.Unlock() - - return &SafeMetrics{ - Metrics: mb.Metrics.WithLabels(l), - Mutex: mb.Mutex, - } -} - -// AddGaugeDataPoint wraps Metrics.AddGaugeDataPoint. -func (mb *SafeMetrics) AddGaugeDataPoint(name string, metricValue int64) *SafeMetrics { - mb.Lock() - mb.Metrics.AddGaugeDataPoint(name, metricValue) - mb.Unlock() - return mb -} - -// AddDGaugeDataPoint wraps Metrics.AddDGaugeDataPoint. -func (mb *SafeMetrics) AddDGaugeDataPoint(name string, metricValue float64) *SafeMetrics { - mb.Lock() - mb.Metrics.AddDGaugeDataPoint(name, metricValue) - mb.Unlock() - return mb -} - -// AddSumDataPoint wraps Metrics.AddSumDataPoint. -func (mb *SafeMetrics) AddSumDataPoint(name string, metricValue int64) *SafeMetrics { - mb.Lock() - mb.Metrics.AddSumDataPoint(name, metricValue) - mb.Unlock() - return mb -} - -// AddDSumDataPoint wraps Metrics.AddDSumDataPoint. -func (mb *SafeMetrics) AddDSumDataPoint(name string, metricValue float64) *SafeMetrics { - mb.Lock() - mb.Metrics.AddDSumDataPoint(name, metricValue) - mb.Unlock() - return mb -} - -// AddHistogramRawDataPoint wraps Metrics.AddHistogramRawDataPoint. -func (mb *SafeMetrics) AddHistogramRawDataPoint(name string, hist pdata.IntHistogramDataPoint) *SafeMetrics { - mb.Lock() - mb.Metrics.AddHistogramRawDataPoint(name, hist) - mb.Unlock() - return mb -} - -// AddDHistogramRawDataPoint wraps AddDHistogramRawDataPoint. -func (mb *SafeMetrics) AddDHistogramRawDataPoint(name string, hist pdata.HistogramDataPoint) *SafeMetrics { - mb.Lock() - mb.Metrics.AddDHistogramRawDataPoint(name, hist) - mb.Unlock() - return mb -} diff --git a/internal/otel_collector/docs/metric-metadata.yaml b/internal/otel_collector/docs/metric-metadata.yaml index e469e3d64ba..e68242a8907 100644 --- a/internal/otel_collector/docs/metric-metadata.yaml +++ b/internal/otel_collector/docs/metric-metadata.yaml @@ -23,11 +23,11 @@ metrics: unit: # Required data: - # Required: one of int gauge, int sum, int histogram, double gauge, double sum, or double histogram. + # Required: one of int gauge, int sum, int histogram, gauge, sum, or histogram. type: - # Required for int sum and double sum. + # Required for int sum and sum. monotonic: # true | false - # Required for int sum, int histogram, double sum, and double histogram. + # Required for int sum, int histogram, sum, and histogram. aggregation: # delta | cumulative # Optional: array of labels that were defined in the labels section that are emitted by this metric. labels: diff --git a/internal/otel_collector/docs/service-extensions.md b/internal/otel_collector/docs/service-extensions.md index 9286c5b4379..6d707a6d31e 100644 --- a/internal/otel_collector/docs/service-extensions.md +++ b/internal/otel_collector/docs/service-extensions.md @@ -20,7 +20,7 @@ to receive data. These are a necessary addition to allow implementing extensions that indicate to LBs and external systems if the service instance is ready or not to receive data (e.g.: a [k8s readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-readiness-probes)). -These state changes are under the control of the service application hosting +These state changes are under the control of the service server hosting the extensions. There are more complex scenarios in which there can be notifications of state diff --git a/internal/otel_collector/consumer/simple/doc.go b/internal/otel_collector/exporter/exporterhelper/doc.go similarity index 79% rename from internal/otel_collector/consumer/simple/doc.go rename to internal/otel_collector/exporter/exporterhelper/doc.go index 2c7e9034372..6cd2d2c4cfe 100644 --- a/internal/otel_collector/consumer/simple/doc.go +++ b/internal/otel_collector/exporter/exporterhelper/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package simple facilitates building pdata.Metrics in receivers in an -// easier and more fluent way than using pdata.Metrics directly. -package simple +// Package exporterhelper provides helper functions for exporters. +package exporterhelper diff --git a/internal/otel_collector/exporter/exporterhelper/logs.go b/internal/otel_collector/exporter/exporterhelper/logs.go index d2797c6b477..1f05b9a26d6 100644 --- a/internal/otel_collector/exporter/exporterhelper/logs.go +++ b/internal/otel_collector/exporter/exporterhelper/logs.go @@ -18,14 +18,12 @@ import ( "context" "errors" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type logsRequest struct { @@ -66,7 +64,7 @@ type logsExporter struct { // NewLogsExporter creates an LogsExporter that records observability metrics and wraps every request with a Span. func NewLogsExporter( cfg config.Exporter, - logger *zap.Logger, + set component.ExporterCreateSettings, pusher consumerhelper.ConsumeLogsFunc, options ...Option, ) (component.LogsExporter, error) { @@ -74,7 +72,7 @@ func NewLogsExporter( return nil, errNilConfig } - if logger == nil { + if set.Logger == nil { return nil, errNilLogger } @@ -83,7 +81,7 @@ func NewLogsExporter( } bs := fromOptions(options...) - be := newBaseExporter(cfg, logger, bs) + be := newBaseExporter(cfg, set.Logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &logsExporterWithObservability{ obsrep: be.obsrep, diff --git a/internal/otel_collector/exporter/exporterhelper/metrics.go b/internal/otel_collector/exporter/exporterhelper/metrics.go index 1a23b8e6a43..243674437da 100644 --- a/internal/otel_collector/exporter/exporterhelper/metrics.go +++ b/internal/otel_collector/exporter/exporterhelper/metrics.go @@ -18,14 +18,12 @@ import ( "context" "errors" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type metricsRequest struct { @@ -55,8 +53,7 @@ func (req *metricsRequest) export(ctx context.Context) error { } func (req *metricsRequest) count() int { - _, numPoints := req.md.MetricAndDataPointCount() - return numPoints + return req.md.DataPointCount() } type metricsExporter struct { @@ -67,7 +64,7 @@ type metricsExporter struct { // NewMetricsExporter creates an MetricsExporter that records observability metrics and wraps every request with a Span. func NewMetricsExporter( cfg config.Exporter, - logger *zap.Logger, + set component.ExporterCreateSettings, pusher consumerhelper.ConsumeMetricsFunc, options ...Option, ) (component.MetricsExporter, error) { @@ -75,7 +72,7 @@ func NewMetricsExporter( return nil, errNilConfig } - if logger == nil { + if set.Logger == nil { return nil, errNilLogger } @@ -84,7 +81,7 @@ func NewMetricsExporter( } bs := fromOptions(options...) - be := newBaseExporter(cfg, logger, bs) + be := newBaseExporter(cfg, set.Logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &metricsSenderWithObservability{ obsrep: be.obsrep, diff --git a/internal/otel_collector/exporter/exporterhelper/queued_retry.go b/internal/otel_collector/exporter/exporterhelper/queued_retry.go index c413e8495a4..f4f9db48c85 100644 --- a/internal/otel_collector/exporter/exporterhelper/queued_retry.go +++ b/internal/otel_collector/exporter/exporterhelper/queued_retry.go @@ -25,7 +25,8 @@ import ( "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -103,7 +104,7 @@ type queuedRetrySender struct { consumerSender requestSender queue *queue.BoundedQueue retryStopCh chan struct{} - traceAttributes []trace.Attribute + traceAttributes []attribute.KeyValue logger *zap.Logger } @@ -129,7 +130,7 @@ func createSampledLogger(logger *zap.Logger) *zap.Logger { func newQueuedRetrySender(fullName string, qCfg QueueSettings, rCfg RetrySettings, nextSender requestSender, logger *zap.Logger) *queuedRetrySender { retryStopCh := make(chan struct{}) sampledLogger := createSampledLogger(logger) - traceAttr := trace.StringAttribute(obsmetrics.ExporterKey, fullName) + traceAttr := attribute.String(obsmetrics.ExporterKey, fullName) return &queuedRetrySender{ fullName: fullName, cfg: qCfg, @@ -142,7 +143,7 @@ func newQueuedRetrySender(fullName string, qCfg QueueSettings, rCfg RetrySetting }, queue: queue.NewBoundedQueue(qCfg.QueueSize, func(item interface{}) {}), retryStopCh: retryStopCh, - traceAttributes: []trace.Attribute{traceAttr}, + traceAttributes: []attribute.KeyValue{traceAttr}, logger: sampledLogger, } } @@ -184,17 +185,17 @@ func (qrs *queuedRetrySender) send(req request) error { // The grpc/http based receivers will cancel the request context after this function returns. req.setContext(noCancellationContext{Context: req.context()}) - span := trace.FromContext(req.context()) + span := trace.SpanFromContext(req.context()) if !qrs.queue.Produce(req) { qrs.logger.Error( "Dropping data because sending_queue is full. Try increasing queue_size.", zap.Int("dropped_items", req.count()), ) - span.Annotate(qrs.traceAttributes, "Dropped item, sending_queue is full.") + span.AddEvent("Dropped item, sending_queue is full.", trace.WithAttributes(qrs.traceAttributes...)) return errSendingQueueIsFull } - span.Annotate(qrs.traceAttributes, "Enqueued item.") + span.AddEvent("Enqueued item.", trace.WithAttributes(qrs.traceAttributes...)) return nil } @@ -217,20 +218,28 @@ func (qrs *queuedRetrySender) shutdown() { // TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. type throttleRetry struct { - error + err error delay time.Duration } +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + // NewThrottleRetry creates a new throttle retry error. func NewThrottleRetry(err error, delay time.Duration) error { - return &throttleRetry{ - error: err, + return throttleRetry{ + err: err, delay: delay, } } type retrySender struct { - traceAttribute trace.Attribute + traceAttribute attribute.KeyValue cfg RetrySettings nextSender requestSender stopCh chan struct{} @@ -262,14 +271,12 @@ func (rs *retrySender) send(req request) error { Clock: backoff.SystemClock, } expBackoff.Reset() - span := trace.FromContext(req.context()) + span := trace.SpanFromContext(req.context()) retryNum := int64(0) for { - span.Annotate( - []trace.Attribute{ - rs.traceAttribute, - trace.Int64Attribute("retry_num", retryNum)}, - "Sending request.") + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) err := rs.nextSender.send(req) if err == nil { @@ -302,17 +309,19 @@ func (rs *retrySender) send(req request) error { return err } - if throttleErr, isThrottle := err.(*throttleRetry); isThrottle { + throttleErr := throttleRetry{} + isThrottle := errors.As(err, &throttleErr) + if isThrottle { backoffDelay = max(backoffDelay, throttleErr.delay) } backoffDelayStr := backoffDelay.String() - span.Annotate( - []trace.Attribute{ + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( rs.traceAttribute, - trace.StringAttribute("interval", backoffDelayStr), - trace.StringAttribute("error", err.Error())}, - "Exporting failed. Will retry the request after interval.") + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) rs.logger.Info( "Exporting failed. Will retry the request after interval.", zap.Error(err), diff --git a/internal/otel_collector/exporter/exporterhelper/resource_to_label.go b/internal/otel_collector/exporter/exporterhelper/resource_to_label.go index 0c4b6bd8e08..f539188515b 100644 --- a/internal/otel_collector/exporter/exporterhelper/resource_to_label.go +++ b/internal/otel_collector/exporter/exporterhelper/resource_to_label.go @@ -15,7 +15,7 @@ package exporterhelper import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -73,12 +73,12 @@ func addLabelsToMetric(metric *pdata.Metric, labelMap pdata.StringMap) { switch metric.DataType() { case pdata.MetricDataTypeIntGauge: addLabelsToIntDataPoints(metric.IntGauge().DataPoints(), labelMap) - case pdata.MetricDataTypeDoubleGauge: - addLabelsToDoubleDataPoints(metric.DoubleGauge().DataPoints(), labelMap) + case pdata.MetricDataTypeGauge: + addLabelsToDoubleDataPoints(metric.Gauge().DataPoints(), labelMap) case pdata.MetricDataTypeIntSum: addLabelsToIntDataPoints(metric.IntSum().DataPoints(), labelMap) - case pdata.MetricDataTypeDoubleSum: - addLabelsToDoubleDataPoints(metric.DoubleSum().DataPoints(), labelMap) + case pdata.MetricDataTypeSum: + addLabelsToDoubleDataPoints(metric.Sum().DataPoints(), labelMap) case pdata.MetricDataTypeIntHistogram: addLabelsToIntHistogramDataPoints(metric.IntHistogram().DataPoints(), labelMap) case pdata.MetricDataTypeHistogram: diff --git a/internal/otel_collector/exporter/exporterhelper/traces.go b/internal/otel_collector/exporter/exporterhelper/traces.go index 8f67b4484a3..de452636cec 100644 --- a/internal/otel_collector/exporter/exporterhelper/traces.go +++ b/internal/otel_collector/exporter/exporterhelper/traces.go @@ -18,14 +18,12 @@ import ( "context" "errors" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type tracesRequest struct { @@ -66,7 +64,7 @@ type traceExporter struct { // NewTracesExporter creates a TracesExporter that records observability metrics and wraps every request with a Span. func NewTracesExporter( cfg config.Exporter, - logger *zap.Logger, + set component.ExporterCreateSettings, pusher consumerhelper.ConsumeTracesFunc, options ...Option, ) (component.TracesExporter, error) { @@ -75,7 +73,7 @@ func NewTracesExporter( return nil, errNilConfig } - if logger == nil { + if set.Logger == nil { return nil, errNilLogger } @@ -84,7 +82,7 @@ func NewTracesExporter( } bs := fromOptions(options...) - be := newBaseExporter(cfg, logger, bs) + be := newBaseExporter(cfg, set.Logger, bs) be.wrapConsumerSender(func(nextSender requestSender) requestSender { return &tracesExporterWithObservability{ obsrep: be.obsrep, diff --git a/internal/otel_collector/internal/model/errors.go b/internal/otel_collector/exporter/fileexporter/doc.go similarity index 71% rename from internal/otel_collector/internal/model/errors.go rename to internal/otel_collector/exporter/fileexporter/doc.go index 0ce6051dffc..1988008f57f 100644 --- a/internal/otel_collector/internal/model/errors.go +++ b/internal/otel_collector/exporter/fileexporter/doc.go @@ -12,13 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package model - -import ( - "fmt" -) - -// NewErrIncompatibleType returns errIncompatibleType instance -func NewErrIncompatibleType(expected, given interface{}) error { - return fmt.Errorf("expected model type %T but given %T", expected, given) -} +// Package fileexporter exports data to files. +package fileexporter diff --git a/internal/otel_collector/exporter/fileexporter/factory.go b/internal/otel_collector/exporter/fileexporter/factory.go index aa24720889e..0224615d078 100644 --- a/internal/otel_collector/exporter/fileexporter/factory.go +++ b/internal/otel_collector/exporter/fileexporter/factory.go @@ -54,7 +54,7 @@ func createTracesExporter( }) return exporterhelper.NewTracesExporter( cfg, - set.Logger, + set, fe.Unwrap().(*fileExporter).ConsumeTraces, exporterhelper.WithStart(fe.Start), exporterhelper.WithShutdown(fe.Shutdown), @@ -71,7 +71,7 @@ func createMetricsExporter( }) return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, fe.Unwrap().(*fileExporter).ConsumeMetrics, exporterhelper.WithStart(fe.Start), exporterhelper.WithShutdown(fe.Shutdown), @@ -88,7 +88,7 @@ func createLogsExporter( }) return exporterhelper.NewLogsExporter( cfg, - set.Logger, + set, fe.Unwrap().(*fileExporter).ConsumeLogs, exporterhelper.WithStart(fe.Start), exporterhelper.WithShutdown(fe.Shutdown), diff --git a/internal/otel_collector/exporter/fileexporter/file_exporter.go b/internal/otel_collector/exporter/fileexporter/file_exporter.go index 49c7018928d..77900bf5b9c 100644 --- a/internal/otel_collector/exporter/fileexporter/file_exporter.go +++ b/internal/otel_collector/exporter/fileexporter/file_exporter.go @@ -22,8 +22,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/otlp" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" ) // Marshaler configuration used for marhsaling Protobuf to JSON. @@ -44,7 +44,7 @@ func (e *fileExporter) Capabilities() consumer.Capabilities { } func (e *fileExporter) ConsumeTraces(_ context.Context, td pdata.Traces) error { - buf, err := tracesMarshaler.Marshal(td) + buf, err := tracesMarshaler.MarshalTraces(td) if err != nil { return err } @@ -52,7 +52,7 @@ func (e *fileExporter) ConsumeTraces(_ context.Context, td pdata.Traces) error { } func (e *fileExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { - buf, err := metricsMarshaler.Marshal(md) + buf, err := metricsMarshaler.MarshalMetrics(md) if err != nil { return err } @@ -60,7 +60,7 @@ func (e *fileExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error } func (e *fileExporter) ConsumeLogs(_ context.Context, ld pdata.Logs) error { - buf, err := logsMarshaler.Marshal(ld) + buf, err := logsMarshaler.MarshalLogs(ld) if err != nil { return err } diff --git a/internal/otel_collector/exporter/jaegerexporter/doc.go b/internal/otel_collector/exporter/jaegerexporter/doc.go index e66a34822ad..aafcbf18c89 100644 --- a/internal/otel_collector/exporter/jaegerexporter/doc.go +++ b/internal/otel_collector/exporter/jaegerexporter/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package jaegerexporter implements an exporter that sends trace data to -// a Jaeger collector gRPC endpoint. +// Package jaegerexporter sends trace data to a Jaeger Collector gRPC endpoint. package jaegerexporter diff --git a/internal/otel_collector/exporter/jaegerexporter/exporter.go b/internal/otel_collector/exporter/jaegerexporter/exporter.go index cedfee98366..796e849fbbe 100644 --- a/internal/otel_collector/exporter/jaegerexporter/exporter.go +++ b/internal/otel_collector/exporter/jaegerexporter/exporter.go @@ -32,18 +32,18 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/model/pdata" jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" ) // newTracesExporter returns a new Jaeger gRPC exporter. // The exporter name is the name to be used in the observability of the exporter. // The collectorEndpoint should be of the form "hostname:14250" (a gRPC target). -func newTracesExporter(cfg *Config, logger *zap.Logger) (component.TracesExporter, error) { - s := newProtoGRPCSender(cfg, logger) +func newTracesExporter(cfg *Config, set component.ExporterCreateSettings) (component.TracesExporter, error) { + s := newProtoGRPCSender(cfg, set.Logger) return exporterhelper.NewTracesExporter( - cfg, logger, s.pushTraces, + cfg, set, s.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithStart(s.start), exporterhelper.WithShutdown(s.shutdown), diff --git a/internal/otel_collector/exporter/jaegerexporter/factory.go b/internal/otel_collector/exporter/jaegerexporter/factory.go index 6c93712f579..3bc8e00acf6 100644 --- a/internal/otel_collector/exporter/jaegerexporter/factory.go +++ b/internal/otel_collector/exporter/jaegerexporter/factory.go @@ -64,5 +64,5 @@ func createTracesExporter( expCfg.ID().String()) } - return newTracesExporter(expCfg, set.Logger) + return newTracesExporter(expCfg, set) } diff --git a/internal/.otel_collector_mixin/otlptext/mixin.go b/internal/otel_collector/exporter/kafkaexporter/doc.go similarity index 69% rename from internal/.otel_collector_mixin/otlptext/mixin.go rename to internal/otel_collector/exporter/kafkaexporter/doc.go index 045c9578571..d41ea08fb37 100644 --- a/internal/.otel_collector_mixin/otlptext/mixin.go +++ b/internal/otel_collector/exporter/kafkaexporter/doc.go @@ -12,17 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlptext - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/otlptext" -) - -func Traces(td pdata.Traces) string { - return otlptext.Traces(td) -} - -func Metrics(md pdata.Metrics) string { - return otlptext.Metrics(md) -} +// Package kafkaexporter exports trace data to Kafka. +package kafkaexporter diff --git a/internal/otel_collector/exporter/kafkaexporter/factory.go b/internal/otel_collector/exporter/kafkaexporter/factory.go index f42028e4cf8..79d41beada2 100644 --- a/internal/otel_collector/exporter/kafkaexporter/factory.go +++ b/internal/otel_collector/exporter/kafkaexporter/factory.go @@ -111,7 +111,7 @@ func (f *kafkaExporterFactory) createTracesExporter( } return exporterhelper.NewTracesExporter( cfg, - set.Logger, + set, exp.tracesPusher, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, @@ -137,7 +137,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( } return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, exp.metricsDataPusher, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, @@ -163,7 +163,7 @@ func (f *kafkaExporterFactory) createLogsExporter( } return exporterhelper.NewLogsExporter( cfg, - set.Logger, + set, exp.logsDataPusher, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, diff --git a/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaler.go b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaler.go index 6943a6ff285..c118df8c89c 100644 --- a/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaler.go +++ b/internal/otel_collector/exporter/kafkaexporter/jaeger_marshaler.go @@ -22,7 +22,7 @@ import ( jaegerproto "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" ) diff --git a/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go index 087dc75d713..50921e24434 100644 --- a/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go +++ b/internal/otel_collector/exporter/kafkaexporter/kafka_exporter.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") diff --git a/internal/otel_collector/exporter/kafkaexporter/marshaler.go b/internal/otel_collector/exporter/kafkaexporter/marshaler.go index 5598b370614..838d236f1b3 100644 --- a/internal/otel_collector/exporter/kafkaexporter/marshaler.go +++ b/internal/otel_collector/exporter/kafkaexporter/marshaler.go @@ -17,7 +17,8 @@ package kafkaexporter import ( "github.com/Shopify/sarama" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" ) // TracesMarshaler marshals traces into Message array. @@ -49,11 +50,11 @@ type LogsMarshaler interface { // tracesMarshalers returns map of supported encodings with TracesMarshaler. func tracesMarshalers() map[string]TracesMarshaler { - otlppb := &otlpTracesPbMarshaler{} + otlpPb := newPdataTracesMarshaler(otlp.NewProtobufTracesMarshaler(), defaultEncoding) jaegerProto := jaegerMarshaler{marshaler: jaegerProtoSpanMarshaler{}} jaegerJSON := jaegerMarshaler{marshaler: newJaegerJSONMarshaler()} return map[string]TracesMarshaler{ - otlppb.Encoding(): otlppb, + otlpPb.Encoding(): otlpPb, jaegerProto.Encoding(): jaegerProto, jaegerJSON.Encoding(): jaegerJSON, } @@ -61,16 +62,16 @@ func tracesMarshalers() map[string]TracesMarshaler { // metricsMarshalers returns map of supported encodings and MetricsMarshaler func metricsMarshalers() map[string]MetricsMarshaler { - otlppb := &otlpMetricsPbMarshaler{} + otlpPb := newPdataMetricsMarshaler(otlp.NewProtobufMetricsMarshaler(), defaultEncoding) return map[string]MetricsMarshaler{ - otlppb.Encoding(): otlppb, + otlpPb.Encoding(): otlpPb, } } // logsMarshalers returns map of supported encodings and LogsMarshaler func logsMarshalers() map[string]LogsMarshaler { - otlppb := &otlpLogsPbMarshaler{} + otlpPb := newPdataLogsMarshaler(otlp.NewProtobufLogsMarshaler(), defaultEncoding) return map[string]LogsMarshaler{ - otlppb.Encoding(): otlppb, + otlpPb.Encoding(): otlpPb, } } diff --git a/internal/otel_collector/exporter/kafkaexporter/otlp_marshaler.go b/internal/otel_collector/exporter/kafkaexporter/otlp_marshaler.go deleted file mode 100644 index 75b7f928595..00000000000 --- a/internal/otel_collector/exporter/kafkaexporter/otlp_marshaler.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2020 The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kafkaexporter - -import ( - "github.com/Shopify/sarama" - - "go.opentelemetry.io/collector/consumer/pdata" -) - -var _ TracesMarshaler = (*otlpTracesPbMarshaler)(nil) -var _ MetricsMarshaler = (*otlpMetricsPbMarshaler)(nil) - -type otlpTracesPbMarshaler struct { -} - -func (m *otlpTracesPbMarshaler) Encoding() string { - return defaultEncoding -} - -func (m *otlpTracesPbMarshaler) Marshal(td pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { - bts, err := td.ToOtlpProtoBytes() - if err != nil { - return nil, err - } - return []*sarama.ProducerMessage{ - { - Topic: topic, - Value: sarama.ByteEncoder(bts), - }, - }, nil -} - -type otlpMetricsPbMarshaler struct { -} - -func (m *otlpMetricsPbMarshaler) Encoding() string { - return defaultEncoding -} - -func (m *otlpMetricsPbMarshaler) Marshal(md pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) { - bts, err := md.ToOtlpProtoBytes() - if err != nil { - return nil, err - } - return []*sarama.ProducerMessage{ - { - Topic: topic, - Value: sarama.ByteEncoder(bts), - }, - }, nil -} - -type otlpLogsPbMarshaler struct { -} - -func (m *otlpLogsPbMarshaler) Encoding() string { - return defaultEncoding -} - -func (m *otlpLogsPbMarshaler) Marshal(ld pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) { - bts, err := ld.ToOtlpProtoBytes() - if err != nil { - return nil, err - } - return []*sarama.ProducerMessage{ - { - Topic: topic, - Value: sarama.ByteEncoder(bts), - }, - }, nil -} diff --git a/internal/otel_collector/exporter/kafkaexporter/pdata_marshaler.go b/internal/otel_collector/exporter/kafkaexporter/pdata_marshaler.go new file mode 100644 index 00000000000..2dbc69d5dc6 --- /dev/null +++ b/internal/otel_collector/exporter/kafkaexporter/pdata_marshaler.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkaexporter + +import ( + "github.com/Shopify/sarama" + + "go.opentelemetry.io/collector/model/pdata" +) + +type pdataLogsMarshaler struct { + marshaler pdata.LogsMarshaler + encoding string +} + +func (p pdataLogsMarshaler) Marshal(ld pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalLogs(ld) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataLogsMarshaler) Encoding() string { + return p.encoding +} + +func newPdataLogsMarshaler(marshaler pdata.LogsMarshaler, encoding string) LogsMarshaler { + return pdataLogsMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} + +type pdataMetricsMarshaler struct { + marshaler pdata.MetricsMarshaler + encoding string +} + +func (p pdataMetricsMarshaler) Marshal(ld pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalMetrics(ld) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataMetricsMarshaler) Encoding() string { + return p.encoding +} + +func newPdataMetricsMarshaler(marshaler pdata.MetricsMarshaler, encoding string) MetricsMarshaler { + return pdataMetricsMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} + +type pdataTracesMarshaler struct { + marshaler pdata.TracesMarshaler + encoding string +} + +func (p pdataTracesMarshaler) Marshal(td pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { + bts, err := p.marshaler.MarshalTraces(td) + if err != nil { + return nil, err + } + return []*sarama.ProducerMessage{ + { + Topic: topic, + Value: sarama.ByteEncoder(bts), + }, + }, nil +} + +func (p pdataTracesMarshaler) Encoding() string { + return p.encoding +} + +func newPdataTracesMarshaler(marshaler pdata.TracesMarshaler, encoding string) TracesMarshaler { + return pdataTracesMarshaler{ + marshaler: marshaler, + encoding: encoding, + } +} diff --git a/internal/otel_collector/otlptext/mixin.go b/internal/otel_collector/exporter/loggingexporter/doc.go similarity index 69% rename from internal/otel_collector/otlptext/mixin.go rename to internal/otel_collector/exporter/loggingexporter/doc.go index 045c9578571..d6652178b27 100644 --- a/internal/otel_collector/otlptext/mixin.go +++ b/internal/otel_collector/exporter/loggingexporter/doc.go @@ -12,17 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlptext - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/otlptext" -) - -func Traces(td pdata.Traces) string { - return otlptext.Traces(td) -} - -func Metrics(md pdata.Metrics) string { - return otlptext.Metrics(md) -} +// Package loggingexporter exports data to console as logs. +package loggingexporter diff --git a/internal/otel_collector/exporter/loggingexporter/factory.go b/internal/otel_collector/exporter/loggingexporter/factory.go index 0f667fa4030..d7a3bcf502f 100644 --- a/internal/otel_collector/exporter/loggingexporter/factory.go +++ b/internal/otel_collector/exporter/loggingexporter/factory.go @@ -51,7 +51,7 @@ func createDefaultConfig() config.Exporter { } } -func createTracesExporter(_ context.Context, _ component.ExporterCreateSettings, config config.Exporter) (component.TracesExporter, error) { +func createTracesExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.TracesExporter, error) { cfg := config.(*Config) exporterLogger, err := createLogger(cfg) @@ -59,10 +59,10 @@ func createTracesExporter(_ context.Context, _ component.ExporterCreateSettings, return nil, err } - return newTracesExporter(config, cfg.LogLevel, exporterLogger) + return newTracesExporter(config, cfg.LogLevel, exporterLogger, set) } -func createMetricsExporter(_ context.Context, _ component.ExporterCreateSettings, config config.Exporter) (component.MetricsExporter, error) { +func createMetricsExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.MetricsExporter, error) { cfg := config.(*Config) exporterLogger, err := createLogger(cfg) @@ -70,10 +70,10 @@ func createMetricsExporter(_ context.Context, _ component.ExporterCreateSettings return nil, err } - return newMetricsExporter(config, cfg.LogLevel, exporterLogger) + return newMetricsExporter(config, cfg.LogLevel, exporterLogger, set) } -func createLogsExporter(_ context.Context, _ component.ExporterCreateSettings, config config.Exporter) (component.LogsExporter, error) { +func createLogsExporter(_ context.Context, set component.ExporterCreateSettings, config config.Exporter) (component.LogsExporter, error) { cfg := config.(*Config) exporterLogger, err := createLogger(cfg) @@ -81,7 +81,7 @@ func createLogsExporter(_ context.Context, _ component.ExporterCreateSettings, c return nil, err } - return newLogsExporter(config, cfg.LogLevel, exporterLogger) + return newLogsExporter(config, cfg.LogLevel, exporterLogger, set) } func createLogger(cfg *Config) (*zap.Logger, error) { diff --git a/internal/otel_collector/exporter/loggingexporter/logging_exporter.go b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go index cb9a0ae17ab..daf0e49a95a 100644 --- a/internal/otel_collector/exporter/loggingexporter/logging_exporter.go +++ b/internal/otel_collector/exporter/loggingexporter/logging_exporter.go @@ -24,58 +24,81 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/internal/otlptext" + "go.opentelemetry.io/collector/model/pdata" ) type loggingExporter struct { - logger *zap.Logger - debug bool + logger *zap.Logger + debug bool + logsMarshaler pdata.LogsMarshaler + metricsMarshaler pdata.MetricsMarshaler + tracesMarshaler pdata.TracesMarshaler } -func (s *loggingExporter) pushTraces( - _ context.Context, - td pdata.Traces, -) error { - +func (s *loggingExporter) pushTraces(_ context.Context, td pdata.Traces) error { s.logger.Info("TracesExporter", zap.Int("#spans", td.SpanCount())) if !s.debug { return nil } - s.logger.Debug(otlptext.Traces(td)) - + buf, err := s.tracesMarshaler.MarshalTraces(td) + if err != nil { + return err + } + s.logger.Debug(string(buf)) return nil } -func (s *loggingExporter) pushMetrics( - _ context.Context, - md pdata.Metrics, -) error { +func (s *loggingExporter) pushMetrics(_ context.Context, md pdata.Metrics) error { s.logger.Info("MetricsExporter", zap.Int("#metrics", md.MetricCount())) if !s.debug { return nil } - s.logger.Debug(otlptext.Metrics(md)) + buf, err := s.metricsMarshaler.MarshalMetrics(md) + if err != nil { + return err + } + s.logger.Debug(string(buf)) + return nil +} + +func (s *loggingExporter) pushLogs(_ context.Context, ld pdata.Logs) error { + s.logger.Info("LogsExporter", zap.Int("#logs", ld.LogRecordCount())) + + if !s.debug { + return nil + } + buf, err := s.logsMarshaler.MarshalLogs(ld) + if err != nil { + return err + } + s.logger.Debug(string(buf)) return nil } -// newTracesExporter creates an exporter.TracesExporter that just drops the -// received data and logs debugging messages. -func newTracesExporter(config config.Exporter, level string, logger *zap.Logger) (component.TracesExporter, error) { - s := &loggingExporter{ - debug: strings.ToLower(level) == "debug", - logger: logger, +func newLoggingExporter(level string, logger *zap.Logger) *loggingExporter { + return &loggingExporter{ + debug: strings.ToLower(level) == "debug", + logger: logger, + logsMarshaler: otlptext.NewTextLogsMarshaler(), + metricsMarshaler: otlptext.NewTextMetricsMarshaler(), + tracesMarshaler: otlptext.NewTextTracesMarshaler(), } +} +// newTracesExporter creates an exporter.TracesExporter that just drops the +// received data and logs debugging messages. +func newTracesExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.TracesExporter, error) { + s := newLoggingExporter(level, logger) return exporterhelper.NewTracesExporter( config, - logger, + set, s.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue @@ -88,15 +111,11 @@ func newTracesExporter(config config.Exporter, level string, logger *zap.Logger) // newMetricsExporter creates an exporter.MetricsExporter that just drops the // received data and logs debugging messages. -func newMetricsExporter(config config.Exporter, level string, logger *zap.Logger) (component.MetricsExporter, error) { - s := &loggingExporter{ - debug: strings.ToLower(level) == "debug", - logger: logger, - } - +func newMetricsExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.MetricsExporter, error) { + s := newLoggingExporter(level, logger) return exporterhelper.NewMetricsExporter( config, - logger, + set, s.pushMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue @@ -109,15 +128,11 @@ func newMetricsExporter(config config.Exporter, level string, logger *zap.Logger // newLogsExporter creates an exporter.LogsExporter that just drops the // received data and logs debugging messages. -func newLogsExporter(config config.Exporter, level string, logger *zap.Logger) (component.LogsExporter, error) { - s := &loggingExporter{ - debug: strings.ToLower(level) == "debug", - logger: logger, - } - +func newLogsExporter(config config.Exporter, level string, logger *zap.Logger, set component.ExporterCreateSettings) (component.LogsExporter, error) { + s := newLoggingExporter(level, logger) return exporterhelper.NewLogsExporter( config, - logger, + set, s.pushLogs, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable Timeout/RetryOnFailure and SendingQueue @@ -128,21 +143,6 @@ func newLogsExporter(config config.Exporter, level string, logger *zap.Logger) ( ) } -func (s *loggingExporter) pushLogs( - _ context.Context, - ld pdata.Logs, -) error { - s.logger.Info("LogsExporter", zap.Int("#logs", ld.LogRecordCount())) - - if !s.debug { - return nil - } - - s.logger.Debug(otlptext.Logs(ld)) - - return nil -} - func loggerSync(logger *zap.Logger) func(context.Context) error { return func(context.Context) error { // Currently Sync() return a different error depending on the OS. diff --git a/internal/otel_collector/exporter/opencensusexporter/doc.go b/internal/otel_collector/exporter/opencensusexporter/doc.go new file mode 100644 index 00000000000..13cb23b5bb8 --- /dev/null +++ b/internal/otel_collector/exporter/opencensusexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensusexporter exports data to an OpenCensus agent. +package opencensusexporter diff --git a/internal/otel_collector/exporter/opencensusexporter/factory.go b/internal/otel_collector/exporter/opencensusexporter/factory.go index 79689082a44..caea1ae9609 100644 --- a/internal/otel_collector/exporter/opencensusexporter/factory.go +++ b/internal/otel_collector/exporter/opencensusexporter/factory.go @@ -59,7 +59,7 @@ func createTracesExporter(ctx context.Context, set component.ExporterCreateSetti return exporterhelper.NewTracesExporter( cfg, - set.Logger, + set, oce.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithRetry(oCfg.RetrySettings), @@ -77,7 +77,7 @@ func createMetricsExporter(ctx context.Context, set component.ExporterCreateSett return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, oce.pushMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithRetry(oCfg.RetrySettings), diff --git a/internal/otel_collector/exporter/opencensusexporter/opencensus.go b/internal/otel_collector/exporter/opencensusexporter/opencensus.go index ca1c28f3e7b..eeff52e177e 100644 --- a/internal/otel_collector/exporter/opencensusexporter/opencensus.go +++ b/internal/otel_collector/exporter/opencensusexporter/opencensus.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/metadata" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/internaldata" ) diff --git a/internal/otel_collector/exporter/otlpexporter/doc.go b/internal/otel_collector/exporter/otlpexporter/doc.go new file mode 100644 index 00000000000..41d4b7662fa --- /dev/null +++ b/internal/otel_collector/exporter/otlpexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlpexporter exports data by using the OTLP format to a gPRC endpoint. +package otlpexporter diff --git a/internal/otel_collector/exporter/otlpexporter/factory.go b/internal/otel_collector/exporter/otlpexporter/factory.go index 8a53242cb4f..bb8f89bbf48 100644 --- a/internal/otel_collector/exporter/otlpexporter/factory.go +++ b/internal/otel_collector/exporter/otlpexporter/factory.go @@ -65,7 +65,7 @@ func createTracesExporter( oCfg := cfg.(*Config) return exporterhelper.NewTracesExporter( cfg, - set.Logger, + set, oce.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), @@ -87,7 +87,7 @@ func createMetricsExporter( oCfg := cfg.(*Config) return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, oce.pushMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), @@ -110,7 +110,7 @@ func createLogsExporter( oCfg := cfg.(*Config) return exporterhelper.NewLogsExporter( cfg, - set.Logger, + set, oce.pushLogs, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithTimeout(oCfg.TimeoutSettings), diff --git a/internal/otel_collector/exporter/otlpexporter/otlp.go b/internal/otel_collector/exporter/otlpexporter/otlp.go index 8502d8b397f..b592d790ceb 100644 --- a/internal/otel_collector/exporter/otlpexporter/otlp.go +++ b/internal/otel_collector/exporter/otlpexporter/otlp.go @@ -29,9 +29,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/internal/pdatagrpc" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" ) type exporter struct { @@ -86,9 +86,9 @@ func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { type grpcSender struct { // gRPC clients and connection. - traceExporter pdatagrpc.TracesClient - metricExporter pdatagrpc.MetricsClient - logExporter pdatagrpc.LogsClient + traceExporter otlpgrpc.TracesClient + metricExporter otlpgrpc.MetricsClient + logExporter otlpgrpc.LogsClient clientConn *grpc.ClientConn metadata metadata.MD callOptions []grpc.CallOption @@ -106,9 +106,9 @@ func newGrpcSender(config *Config, ext map[config.ComponentID]component.Extensio } gs := &grpcSender{ - traceExporter: pdatagrpc.NewTracesClient(clientConn), - metricExporter: pdatagrpc.NewMetricsClient(clientConn), - logExporter: pdatagrpc.NewLogsClient(clientConn), + traceExporter: otlpgrpc.NewTracesClient(clientConn), + metricExporter: otlpgrpc.NewMetricsClient(clientConn), + logExporter: otlpgrpc.NewLogsClient(clientConn), clientConn: clientConn, metadata: metadata.New(config.GRPCClientSettings.Headers), callOptions: []grpc.CallOption{ diff --git a/internal/otel_collector/exporter/otlphttpexporter/doc.go b/internal/otel_collector/exporter/otlphttpexporter/doc.go new file mode 100644 index 00000000000..816df31ac35 --- /dev/null +++ b/internal/otel_collector/exporter/otlphttpexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlphttpexporter exports data by using the OTLP format to an HTTP endpoint. +package otlphttpexporter diff --git a/internal/otel_collector/exporter/otlphttpexporter/factory.go b/internal/otel_collector/exporter/otlphttpexporter/factory.go index 5353c5c3ad7..733714ea9db 100644 --- a/internal/otel_collector/exporter/otlphttpexporter/factory.go +++ b/internal/otel_collector/exporter/otlphttpexporter/factory.go @@ -90,7 +90,7 @@ func createTracesExporter( return exporterhelper.NewTracesExporter( cfg, - set.Logger, + set, oce.pushTraces, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), @@ -118,7 +118,7 @@ func createMetricsExporter( return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, oce.pushMetrics, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), @@ -146,7 +146,7 @@ func createLogsExporter( return exporterhelper.NewLogsExporter( cfg, - set.Logger, + set, oce.pushLogs, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), diff --git a/internal/otel_collector/exporter/otlphttpexporter/otlp.go b/internal/otel_collector/exporter/otlphttpexporter/otlp.go index 6c3c8f8322a..53da6082acb 100644 --- a/internal/otel_collector/exporter/otlphttpexporter/otlp.go +++ b/internal/otel_collector/exporter/otlphttpexporter/otlp.go @@ -35,9 +35,10 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/internal/middleware" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" ) type exporter struct { @@ -50,6 +51,12 @@ type exporter struct { logger *zap.Logger } +var ( + tracesMarshaler = otlp.NewProtobufTracesMarshaler() + metricsMarshaler = otlp.NewProtobufMetricsMarshaler() + logsMarshaler = otlp.NewProtobufLogsMarshaler() +) + const ( headerRetryAfter = "Retry-After" maxHTTPResponseReadBytes = 64 * 1024 @@ -92,8 +99,8 @@ func (e *exporter) start(_ context.Context, host component.Host) error { return nil } -func (e *exporter) pushTraces(ctx context.Context, traces pdata.Traces) error { - request, err := traces.ToOtlpProtoBytes() +func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { + request, err := tracesMarshaler.MarshalTraces(td) if err != nil { return consumererror.Permanent(err) } @@ -101,16 +108,16 @@ func (e *exporter) pushTraces(ctx context.Context, traces pdata.Traces) error { return e.export(ctx, e.tracesURL, request) } -func (e *exporter) pushMetrics(ctx context.Context, metrics pdata.Metrics) error { - request, err := metrics.ToOtlpProtoBytes() +func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { + request, err := metricsMarshaler.MarshalMetrics(md) if err != nil { return consumererror.Permanent(err) } return e.export(ctx, e.metricsURL, request) } -func (e *exporter) pushLogs(ctx context.Context, logs pdata.Logs) error { - request, err := logs.ToOtlpProtoBytes() +func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { + request, err := logsMarshaler.MarshalLogs(ld) if err != nil { return consumererror.Permanent(err) } diff --git a/internal/otel_collector/exporter/prometheusexporter/accumulator.go b/internal/otel_collector/exporter/prometheusexporter/accumulator.go index 3652d05f374..f3a509e5ace 100644 --- a/internal/otel_collector/exporter/prometheusexporter/accumulator.go +++ b/internal/otel_collector/exporter/prometheusexporter/accumulator.go @@ -22,7 +22,7 @@ import ( "go.uber.org/zap" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type accumulatedValue struct { @@ -86,10 +86,10 @@ func (a *lastValueAccumulator) addMetric(metric pdata.Metric, il pdata.Instrumen return a.accumulateIntGauge(metric, il, now) case pdata.MetricDataTypeIntSum: return a.accumulateIntSum(metric, il, now) - case pdata.MetricDataTypeDoubleGauge: + case pdata.MetricDataTypeGauge: return a.accumulateDoubleGauge(metric, il, now) - case pdata.MetricDataTypeDoubleSum: - return a.accumulateDoubleSum(metric, il, now) + case pdata.MetricDataTypeSum: + return a.accumulateSum(metric, il, now) case pdata.MetricDataTypeIntHistogram: return a.accumulateIntHistogram(metric, il, now) case pdata.MetricDataTypeHistogram: @@ -162,7 +162,7 @@ func (a *lastValueAccumulator) accumulateIntGauge(metric pdata.Metric, il pdata. } func (a *lastValueAccumulator) accumulateDoubleGauge(metric pdata.Metric, il pdata.InstrumentationLibrary, now time.Time) (n int) { - dps := metric.DoubleGauge().DataPoints() + dps := metric.Gauge().DataPoints() for i := 0; i < dps.Len(); i++ { ip := dps.At(i) @@ -171,20 +171,20 @@ func (a *lastValueAccumulator) accumulateDoubleGauge(metric pdata.Metric, il pda v, ok := a.registeredMetrics.Load(signature) if !ok { m := createMetric(metric) - ip.CopyTo(m.DoubleGauge().DataPoints().AppendEmpty()) + ip.CopyTo(m.Gauge().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, instrumentationLibrary: il, updated: now}) n++ continue } mv := v.(*accumulatedValue) - if ip.Timestamp().AsTime().Before(mv.value.DoubleGauge().DataPoints().At(0).Timestamp().AsTime()) { + if ip.Timestamp().AsTime().Before(mv.value.Gauge().DataPoints().At(0).Timestamp().AsTime()) { // only keep datapoint with latest timestamp continue } m := createMetric(metric) - ip.CopyTo(m.DoubleGauge().DataPoints().AppendEmpty()) + ip.CopyTo(m.Gauge().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, instrumentationLibrary: il, updated: now}) n++ } @@ -232,8 +232,8 @@ func (a *lastValueAccumulator) accumulateIntSum(metric pdata.Metric, il pdata.In return } -func (a *lastValueAccumulator) accumulateDoubleSum(metric pdata.Metric, il pdata.InstrumentationLibrary, now time.Time) (n int) { - doubleSum := metric.DoubleSum() +func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.InstrumentationLibrary, now time.Time) (n int) { + doubleSum := metric.Sum() // Drop metrics with non-cumulative aggregations if doubleSum.AggregationTemporality() != pdata.AggregationTemporalityCumulative { @@ -249,24 +249,24 @@ func (a *lastValueAccumulator) accumulateDoubleSum(metric pdata.Metric, il pdata v, ok := a.registeredMetrics.Load(signature) if !ok { m := createMetric(metric) - m.DoubleSum().SetIsMonotonic(metric.DoubleSum().IsMonotonic()) - m.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) - ip.CopyTo(m.DoubleSum().DataPoints().AppendEmpty()) + m.Sum().SetIsMonotonic(metric.Sum().IsMonotonic()) + m.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + ip.CopyTo(m.Sum().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, instrumentationLibrary: il, updated: now}) n++ continue } mv := v.(*accumulatedValue) - if ip.Timestamp().AsTime().Before(mv.value.DoubleSum().DataPoints().At(0).Timestamp().AsTime()) { + if ip.Timestamp().AsTime().Before(mv.value.Sum().DataPoints().At(0).Timestamp().AsTime()) { // only keep datapoint with latest timestamp continue } m := createMetric(metric) - m.DoubleSum().SetIsMonotonic(metric.DoubleSum().IsMonotonic()) - m.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) - ip.CopyTo(m.DoubleSum().DataPoints().AppendEmpty()) + m.Sum().SetIsMonotonic(metric.Sum().IsMonotonic()) + m.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + ip.CopyTo(m.Sum().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, instrumentationLibrary: il, updated: now}) n++ } diff --git a/internal/otel_collector/exporter/prometheusexporter/collector.go b/internal/otel_collector/exporter/prometheusexporter/collector.go index 921042180f1..572de9ae129 100644 --- a/internal/otel_collector/exporter/prometheusexporter/collector.go +++ b/internal/otel_collector/exporter/prometheusexporter/collector.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type collector struct { @@ -62,10 +62,10 @@ func (c *collector) convertMetric(metric pdata.Metric) (prometheus.Metric, error return c.convertIntGauge(metric) case pdata.MetricDataTypeIntSum: return c.convertIntSum(metric) - case pdata.MetricDataTypeDoubleGauge: + case pdata.MetricDataTypeGauge: return c.convertDoubleGauge(metric) - case pdata.MetricDataTypeDoubleSum: - return c.convertDoubleSum(metric) + case pdata.MetricDataTypeSum: + return c.convertSum(metric) case pdata.MetricDataTypeIntHistogram: return c.convertIntHistogram(metric) case pdata.MetricDataTypeHistogram: @@ -118,7 +118,7 @@ func (c *collector) convertIntGauge(metric pdata.Metric) (prometheus.Metric, err } func (c *collector) convertDoubleGauge(metric pdata.Metric) (prometheus.Metric, error) { - ip := metric.DoubleGauge().DataPoints().At(0) + ip := metric.Gauge().DataPoints().At(0) desc, labels := c.getMetricMetadata(metric, ip.LabelsMap()) m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, ip.Value(), labels...) @@ -152,11 +152,11 @@ func (c *collector) convertIntSum(metric pdata.Metric) (prometheus.Metric, error return m, nil } -func (c *collector) convertDoubleSum(metric pdata.Metric) (prometheus.Metric, error) { - ip := metric.DoubleSum().DataPoints().At(0) +func (c *collector) convertSum(metric pdata.Metric) (prometheus.Metric, error) { + ip := metric.Sum().DataPoints().At(0) metricType := prometheus.GaugeValue - if metric.DoubleSum().IsMonotonic() { + if metric.Sum().IsMonotonic() { metricType = prometheus.CounterValue } diff --git a/internal/otel_collector/exporter/prometheusexporter/doc.go b/internal/otel_collector/exporter/prometheusexporter/doc.go new file mode 100644 index 00000000000..d193c39fea4 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheusexporter exports metrics data as a Prometheus pull handler. +package prometheusexporter diff --git a/internal/otel_collector/exporter/prometheusexporter/factory.go b/internal/otel_collector/exporter/prometheusexporter/factory.go index b7a836fe3e9..e571eb484c5 100644 --- a/internal/otel_collector/exporter/prometheusexporter/factory.go +++ b/internal/otel_collector/exporter/prometheusexporter/factory.go @@ -60,7 +60,7 @@ func createMetricsExporter( exporter, err := exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, prometheus.ConsumeMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), exporterhelper.WithStart(prometheus.Start), diff --git a/internal/otel_collector/exporter/prometheusexporter/prometheus.go b/internal/otel_collector/exporter/prometheusexporter/prometheus.go index aa8bee2ede0..4d6d9b9c40e 100644 --- a/internal/otel_collector/exporter/prometheusexporter/prometheus.go +++ b/internal/otel_collector/exporter/prometheusexporter/prometheus.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" ) diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/doc.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/doc.go new file mode 100644 index 00000000000..fc02fadc5c1 --- /dev/null +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheusremotewriteexporter sends metrics data to Prometheus Remote Write (PRW) endpoints. +package prometheusremotewriteexporter diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go index f8d81e540a9..be41159d51a 100644 --- a/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/exporter.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheusremotewriteexporter implements an exporter that sends Prometheus remote write requests. package prometheusremotewriteexporter import ( @@ -35,7 +34,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const maxBatchByteSize = 3000000 @@ -130,8 +129,8 @@ func (prwe *PRWExporter) PushMetrics(ctx context.Context, md pdata.Metrics) erro // handle individual metric based on type switch metric.DataType() { - case pdata.MetricDataTypeDoubleGauge: - dataPoints := metric.DoubleGauge().DataPoints() + case pdata.MetricDataTypeGauge: + dataPoints := metric.Gauge().DataPoints() if err := prwe.addDoubleDataPointSlice(dataPoints, tsMap, resource, metric); err != nil { dropped++ errs = append(errs, err) @@ -142,8 +141,8 @@ func (prwe *PRWExporter) PushMetrics(ctx context.Context, md pdata.Metrics) erro dropped++ errs = append(errs, err) } - case pdata.MetricDataTypeDoubleSum: - dataPoints := metric.DoubleSum().DataPoints() + case pdata.MetricDataTypeSum: + dataPoints := metric.Sum().DataPoints() if err := prwe.addDoubleDataPointSlice(dataPoints, tsMap, resource, metric); err != nil { dropped++ errs = append(errs, err) diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go index ea4f9af06da..fc36dfddafb 100644 --- a/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/factory.go @@ -17,6 +17,7 @@ package prometheusremotewriteexporter import ( "context" "errors" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" @@ -58,7 +59,7 @@ func createMetricsExporter(_ context.Context, set component.ExporterCreateSettin // "out of order samples" errors. return exporterhelper.NewMetricsExporter( cfg, - set.Logger, + set, prwe.PushMetrics, exporterhelper.WithTimeout(prwCfg.TimeoutSettings), exporterhelper.WithQueue(exporterhelper.QueueSettings{ @@ -79,7 +80,12 @@ func createDefaultConfig() config.Exporter { Namespace: "", ExternalLabels: map[string]string{}, TimeoutSettings: exporterhelper.DefaultTimeoutSettings(), - RetrySettings: exporterhelper.DefaultRetrySettings(), + RetrySettings: exporterhelper.RetrySettings{ + Enabled: true, + InitialInterval: 50 * time.Millisecond, + MaxInterval: 200 * time.Millisecond, + MaxElapsedTime: 1 * time.Minute, + }, HTTPClientSettings: confighttp.HTTPClientSettings{ Endpoint: "http://some.url:9411/api/prom/push", // We almost read 0 bytes, so no need to tune ReadBufferSize. diff --git a/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go index a3fd17f66b0..894eb4be864 100644 --- a/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go +++ b/internal/otel_collector/exporter/prometheusremotewriteexporter/helper.go @@ -26,7 +26,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const ( @@ -51,12 +51,12 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // matching metric type and field func validateMetrics(metric pdata.Metric) bool { switch metric.DataType() { - case pdata.MetricDataTypeDoubleGauge: - return metric.DoubleGauge().DataPoints().Len() != 0 + case pdata.MetricDataTypeGauge: + return metric.Gauge().DataPoints().Len() != 0 case pdata.MetricDataTypeIntGauge: return metric.IntGauge().DataPoints().Len() != 0 - case pdata.MetricDataTypeDoubleSum: - return metric.DoubleSum().DataPoints().Len() != 0 && metric.DoubleSum().AggregationTemporality() == pdata.AggregationTemporalityCumulative + case pdata.MetricDataTypeSum: + return metric.Sum().DataPoints().Len() != 0 && metric.Sum().AggregationTemporality() == pdata.AggregationTemporalityCumulative case pdata.MetricDataTypeIntSum: return metric.IntSum().DataPoints().Len() != 0 && metric.IntSum().AggregationTemporality() == pdata.AggregationTemporalityCumulative case pdata.MetricDataTypeHistogram: diff --git a/internal/otel_collector/exporter/zipkinexporter/doc.go b/internal/otel_collector/exporter/zipkinexporter/doc.go new file mode 100644 index 00000000000..488f0ca1209 --- /dev/null +++ b/internal/otel_collector/exporter/zipkinexporter/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zipkinexporter exports trace data to Zipkin. +package zipkinexporter diff --git a/internal/otel_collector/exporter/zipkinexporter/factory.go b/internal/otel_collector/exporter/zipkinexporter/factory.go index af4034f7a77..dab066a8d79 100644 --- a/internal/otel_collector/exporter/zipkinexporter/factory.go +++ b/internal/otel_collector/exporter/zipkinexporter/factory.go @@ -77,7 +77,7 @@ func createTracesExporter( } return exporterhelper.NewTracesExporter( zc, - set.Logger, + set, ze.pushTraces, exporterhelper.WithStart(ze.start), // explicitly disable since we rely on http.Client timeout logic. diff --git a/internal/otel_collector/exporter/zipkinexporter/zipkin.go b/internal/otel_collector/exporter/zipkinexporter/zipkin.go index 7f220d486e7..90ed4466419 100644 --- a/internal/otel_collector/exporter/zipkinexporter/zipkin.go +++ b/internal/otel_collector/exporter/zipkinexporter/zipkin.go @@ -26,10 +26,12 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/translator/trace/zipkin" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/translator/trace/zipkinv2" ) +var translator zipkinv2.FromTranslator + // zipkinExporter is a multiplexing exporter that spawns a new OpenCensus-Go Zipkin // exporter per unique node encountered. This is because serviceNames per node define // unique services, alongside their IPs. Also it is useful to receive traffic from @@ -71,12 +73,12 @@ func (ze *zipkinExporter) start(_ context.Context, host component.Host) (err err } func (ze *zipkinExporter) pushTraces(ctx context.Context, td pdata.Traces) error { - tbatch, err := zipkin.InternalTracesToZipkinSpans(td) + spans, err := translator.FromTraces(td) if err != nil { return consumererror.Permanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) } - body, err := ze.serializer.Serialize(tbatch) + body, err := ze.serializer.Serialize(spans) if err != nil { return consumererror.Permanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) } diff --git a/internal/otel_collector/extension/ballastextension/README.md b/internal/otel_collector/extension/ballastextension/README.md index 3fc5d93be0a..78018259681 100644 --- a/internal/otel_collector/extension/ballastextension/README.md +++ b/internal/otel_collector/extension/ballastextension/README.md @@ -6,12 +6,38 @@ Memory Ballast extension enables applications to configure memory ballast for th The following settings can be configured: -- `size_mib` (default = 0, disabled): Is the memory ballast size, in MiB. +- `size_mib` (default = 0, disabled): Is the memory ballast size, in MiB. + Takes higher priority than `size_in_percentage` if both are specified at the same time. +- `size_in_percentage` (default = 0, disabled): Set the memory ballast based on the + total memory in percentage, value range is `1-100`. + It is supported in both containerized(eg, docker, k8s) and physical host environments. + +**How ballast size is calculated with percentage configuration** +When `size_in_percentage` is enabled with the value(1-100), the absolute `ballast_size` will be calculated by +`size_in_percentage * totalMemory / 100`. The `totalMemory` can be retrieved for hosts and containers(in docker, k8s, etc) by the following steps, +1. Look up Memory Cgroup subsystem on the target host or container, find out if there is any total memory limitation has been set for the running collector process. + Check the value in `memory.limit_in_bytes` file under cgroup memory files (eg, `/sys/fs/cgroup/memory/memory.limit_in_bytes`). + +2. If `memory.limit_in_bytes` is positive value other than `9223372036854771712`(`0x7FFFFFFFFFFFF000`). The `ballest_size` + will be calculated by `memory.limit_in_bytes * size_in_percentage / 100`. + If `memory.limit_in_bytes` value is `9223372036854771712`(`0x7FFFFFFFFFFFF000`), it indicates there is no memory limit has + been set for the collector process or the running container in cgroup. Then the `totalMemory` will be determined in next step. + +3. if there is no memory limit set in cgroup for the collector process or container where the collector is running. The total memory will be + calculated by `github.com/shirou/gopsutil/mem`[[link]](https://github.com/shirou/gopsutil/) on `mem.VirtualMemory().total` which is supported in multiple OS systems. -Example: +Example: +Config that uses 64 Mib of memory for the ballast: ```yaml extensions: memory_ballast: size_mib: 64 ``` + +Config that uses 20% of the total memory for the ballast: +```yaml +extensions: + memory_ballast: + size_in_percentage: 20 +``` diff --git a/internal/otel_collector/extension/ballastextension/config.go b/internal/otel_collector/extension/ballastextension/config.go index d9cc0f50aca..fac10933375 100644 --- a/internal/otel_collector/extension/ballastextension/config.go +++ b/internal/otel_collector/extension/ballastextension/config.go @@ -15,14 +15,29 @@ package ballastextension import ( + "fmt" + "go.opentelemetry.io/collector/config" ) -// Config has the configuration for the fluentbit extension. +// Config has the configuration for the ballast extension. type Config struct { config.ExtensionSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct // SizeMiB is the size, in MiB, of the memory ballast // to be created for this process. - SizeMiB uint32 `mapstructure:"size_mib"` + SizeMiB uint64 `mapstructure:"size_mib"` + + // SizeInPercentage is the maximum amount of memory ballast, in %, targeted to be + // allocated. The fixed memory settings SizeMiB has a higher precedence. + SizeInPercentage uint64 `mapstructure:"size_in_percentage"` +} + +// Validate checks if the extension configuration is valid +func (cfg *Config) Validate() error { + // no need to validate less than 0 case for uint64 + if cfg.SizeInPercentage > 100 { + return fmt.Errorf("size_in_percentage is not in range 0 to 100") + } + return nil } diff --git a/internal/otel_collector/extension/ballastextension/factory.go b/internal/otel_collector/extension/ballastextension/factory.go index d12ecadf776..ac69035d05e 100644 --- a/internal/otel_collector/extension/ballastextension/factory.go +++ b/internal/otel_collector/extension/ballastextension/factory.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/extension/extensionhelper" + "go.opentelemetry.io/collector/internal/iruntime" ) const ( @@ -27,6 +28,9 @@ const ( typeStr = "memory_ballast" ) +// memHandler returns the total memory of the target host/vm +var memHandler = iruntime.TotalMemory + // NewFactory creates a factory for FluentBit extension. func NewFactory() component.ExtensionFactory { return extensionhelper.NewFactory( @@ -42,5 +46,5 @@ func createDefaultConfig() config.Extension { } func createExtension(_ context.Context, set component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { - return newMemoryBallast(cfg.(*Config), set.Logger), nil + return newMemoryBallast(cfg.(*Config), set.Logger, memHandler), nil } diff --git a/internal/otel_collector/extension/ballastextension/memory_ballast.go b/internal/otel_collector/extension/ballastextension/memory_ballast.go index 1203934ac2a..4de90a6c016 100644 --- a/internal/otel_collector/extension/ballastextension/memory_ballast.go +++ b/internal/otel_collector/extension/ballastextension/memory_ballast.go @@ -25,17 +25,32 @@ import ( const megaBytes = 1024 * 1024 type memoryBallast struct { - cfg *Config - logger *zap.Logger - ballast []byte + cfg *Config + logger *zap.Logger + ballast []byte + getTotalMem func() (uint64, error) } func (m *memoryBallast) Start(_ context.Context, _ component.Host) error { + var ballastSizeBytes uint64 + // absolute value supersedes percentage setting if m.cfg.SizeMiB > 0 { - ballastSizeBytes := uint64(m.cfg.SizeMiB) * megaBytes + ballastSizeBytes = m.cfg.SizeMiB * megaBytes + } else { + totalMemory, err := m.getTotalMem() + if err != nil { + return err + } + ballastPercentage := m.cfg.SizeInPercentage + ballastSizeBytes = ballastPercentage * totalMemory / 100 + } + + if ballastSizeBytes > 0 { m.ballast = make([]byte, ballastSizeBytes) - m.logger.Info("Using memory ballast", zap.Uint32("MiBs", m.cfg.SizeMiB)) } + + m.logger.Info("Setting memory ballast", zap.Uint32("MiBs", uint32(ballastSizeBytes/megaBytes))) + return nil } @@ -44,9 +59,10 @@ func (m *memoryBallast) Shutdown(_ context.Context) error { return nil } -func newMemoryBallast(cfg *Config, logger *zap.Logger) *memoryBallast { +func newMemoryBallast(cfg *Config, logger *zap.Logger, getTotalMem func() (uint64, error)) *memoryBallast { return &memoryBallast{ - cfg: cfg, - logger: logger, + cfg: cfg, + logger: logger, + getTotalMem: getTotalMem, } } diff --git a/internal/otel_collector/extension/ballastextension/testdata/config.yaml b/internal/otel_collector/extension/ballastextension/testdata/config.yaml index 3d57bd323c1..d7cf835602d 100644 --- a/internal/otel_collector/extension/ballastextension/testdata/config.yaml +++ b/internal/otel_collector/extension/ballastextension/testdata/config.yaml @@ -2,6 +2,7 @@ extensions: memory_ballast: memory_ballast/1: size_mib: 123 + size_in_percentage: 20 # Data pipeline is required to load the config. receivers: diff --git a/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml b/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml new file mode 100644 index 00000000000..c73a7a9975c --- /dev/null +++ b/internal/otel_collector/extension/ballastextension/testdata/config_invalid.yaml @@ -0,0 +1,19 @@ +extensions: + memory_ballast: + size_in_percentage: 200 + +# Data pipeline is required to load the config. +receivers: + nop: +processors: + nop: +exporters: + nop: + +service: + extensions: [memory_ballast] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/go.mod b/internal/otel_collector/go.mod index c9bc12360a4..3f3b6e319ee 100644 --- a/internal/otel_collector/go.mod +++ b/internal/otel_collector/go.mod @@ -4,50 +4,61 @@ go 1.16 require ( contrib.go.opencensus.io/exporter/prometheus v0.3.0 - github.com/Shopify/sarama v1.29.0 + github.com/Shopify/sarama v1.29.1 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/antonmedv/expr v1.8.9 - github.com/apache/thrift v0.14.1 + github.com/apache/thrift v0.14.2 github.com/cenkalti/backoff/v4 v4.1.1 github.com/census-instrumentation/opencensus-proto v0.3.0 github.com/coreos/go-oidc v2.2.1+incompatible github.com/fatih/structtag v1.2.0 - github.com/go-kit/kit v0.10.0 + github.com/go-kit/kit v0.11.0 github.com/go-ole/go-ole v1.2.5 // indirect github.com/gogo/protobuf v1.3.2 - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.3 + github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.2.0 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/jaegertracing/jaeger v1.23.0 + github.com/jaegertracing/jaeger v1.24.0 + github.com/knadh/koanf v1.1.1 github.com/leoluk/perflib_exporter v0.1.0 + github.com/magiconair/properties v1.8.5 + github.com/mitchellh/mapstructure v1.4.1 github.com/openzipkin/zipkin-go v0.2.5 github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_golang v1.10.0 + github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.25.0 - github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 - github.com/rs/cors v1.7.0 - github.com/shirou/gopsutil v3.21.5+incompatible + github.com/prometheus/common v0.29.0 + github.com/prometheus/prometheus v1.8.2-0.20210621150501-ff58416a0b02 + github.com/prometheus/statsd_exporter v0.21.0 // indirect + github.com/rs/cors v1.8.0 + github.com/shirou/gopsutil v3.21.6+incompatible github.com/soheilhy/cmux v0.1.5 github.com/spf13/cast v1.3.1 - github.com/spf13/cobra v1.1.3 - github.com/spf13/viper v1.7.1 + github.com/spf13/cobra v1.2.1 + github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 - github.com/tklauser/go-sysconf v0.3.5 // indirect + github.com/tklauser/go-sysconf v0.3.6 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible github.com/xdg-go/scram v1.0.2 go.opencensus.io v0.23.0 - go.uber.org/atomic v1.7.0 - go.uber.org/zap v1.17.0 - golang.org/x/sys v0.0.0-20210423082822-04245dca01da + go.opentelemetry.io/collector/model v0.30.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0 + go.opentelemetry.io/otel v1.0.0-RC1 + go.opentelemetry.io/otel/oteltest v1.0.0-RC1 + go.opentelemetry.io/otel/trace v1.0.0-RC1 + go.uber.org/atomic v1.8.0 + go.uber.org/zap v1.18.1 + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 golang.org/x/text v0.3.6 - google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f - google.golang.org/grpc v1.38.0 - google.golang.org/protobuf v1.26.0 - gopkg.in/square/go-jose.v2 v2.5.1 // indirect + google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 + google.golang.org/grpc v1.39.0 + google.golang.org/protobuf v1.27.1 gopkg.in/yaml.v2 v2.4.0 ) + +replace go.opentelemetry.io/collector/model => ./model diff --git a/internal/otel_collector/go.sum b/internal/otel_collector/go.sum index 5a89a73b739..e56576336b8 100644 --- a/internal/otel_collector/go.sum +++ b/internal/otel_collector/go.sum @@ -18,8 +18,10 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0 h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -27,6 +29,7 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -43,50 +46,78 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/prometheus v0.3.0 h1:08FMdJYpItzsknogU6PiiNo7XQZg/25GjH236+YCwD0= contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.2.0+incompatible h1:TL2/vJWJEPOrmv97nHcbvjXES0Ntlb9P95hqGA1J2dU= +github.com/Azure/azure-sdk-for-go v55.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223pfkRIQUGOK2IE= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.29.0 h1:ARid8o8oieau9XrHI55f/L3EoRAhm9px6sonbD7yuUE= -github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= @@ -97,7 +128,6 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -105,18 +135,22 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw= github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.14.1 h1:Yh8v0hpCj63p5edXOLaqTJW0IJ1p+eMW6+YSOqw1d6s= -github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= @@ -128,26 +162,40 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= -github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.60/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.68 h1:aOG8geU4SohNp659eKBHRBgbqSrZ6jNZlfimIuJAwL8= +github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -155,23 +203,26 @@ github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnd github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -179,6 +230,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -193,27 +245,33 @@ github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= +github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.58.0 h1:Iy8ULTvgCAxH8dlxZ54qRYpm5uTEb2deUqijywLH7Lo= -github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.62.0 h1:7Gw2KFsWkxl36qJa0s50tgXaE0Cgm51JdRP+MFQvNnM= +github.com/digitalocean/godo v1.62.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= -github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -226,6 +284,7 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= @@ -235,21 +294,26 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= @@ -259,18 +323,26 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= +github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= @@ -300,11 +372,13 @@ github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -328,11 +402,13 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= @@ -348,6 +424,7 @@ github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -367,11 +444,16 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -399,16 +481,21 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -416,8 +503,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -426,6 +514,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -441,17 +530,20 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -473,18 +565,21 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -494,10 +589,14 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= -github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.18.0 h1:V6hcuMPmjXg+js9flU8T3RIHDCjV7F5CG5GD0MRhP/w= +github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -520,35 +619,47 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -559,6 +670,7 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -571,33 +683,51 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.3 h1:BwZa5IjREr75J0am7nblP+X5i95Rmp8EEbMI5vkUWdA= +github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hetznercloud/hcloud-go v1.24.0 h1:/CeHDzhH3Fhm83pjxvE3xNNLbvACl0Lu1/auJ83gG5U= -github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= +github.com/hetznercloud/hcloud-go v1.26.2 h1:fI8BXAGJI4EFeCDd2a/I4EhqyK32cDdxGeWfYMGUi50= +github.com/hetznercloud/hcloud-go v1.26.2/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.113.0/go.mod h1:3TJtvbm/Kwuo5/PEo5P6HUzwVg4bXWkb2wPQHPtQdlU= +github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= +github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.9.2/go.mod h1:UEe3MeD9AaP5rlPIes102IhYua3FhIWZuOXNHxDjSrI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/pkg-config v0.2.6/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/jaegertracing/jaeger v1.23.0 h1:jdv6xzB7esPVIbXXZ5GWkFwX0cGwfbGJVf//xYnV0v8= -github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.24.0 h1:wbzvajFSsV3j5843nIlyUa70+uQevKsT3l7MV29jlxU= +github.com/jaegertracing/jaeger v1.24.0/go.mod h1:mqdtFDA447va5j0UewDaAWyNlGreGQyhGxXVhbF58gQ= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -611,12 +741,15 @@ github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aW github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -626,11 +759,13 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -645,15 +780,18 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knadh/koanf v1.1.1 h1:doO5UBvSXcmngdr/u54HKe+Uz4ZZw0/YHVzSsnE3vD4= +github.com/knadh/koanf v1.1.1/go.mod h1:xpPTwMhsA/aaQLAilyCCqfpEiY1gpa160AiCuWHJUjY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -665,19 +803,24 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leoluk/perflib_exporter v0.1.0 h1:fXe/mDaf9jR+Zk8FjFlcCSksACuIj2VNN4GyKHmQqtA= github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.28.5 h1:JaCziTxHJ7a01MYyjHqskRc8zXQxXOddwrDeoQ2rBnw= +github.com/linode/linodego v0.28.5/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -695,6 +838,7 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -708,11 +852,18 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4f github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.42 h1:gWGe42RGaIqXQZ+r3WUGEKBEtvPHY2SXo4dqixDNxuY= +github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -724,11 +875,15 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= @@ -751,10 +906,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -768,6 +929,7 @@ github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGe github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -797,14 +959,16 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -813,11 +977,13 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -826,19 +992,23 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= -github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -858,29 +1028,35 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= -github.com/prometheus/common v0.25.0 h1:IjJYZJCI8HZYtqA3xYwGyDzSCy1r4CA2GRh+4vdOmtE= -github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= -github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= -github.com/prometheus/statsd_exporter v0.20.0 h1:M0hQphnq2WyWKS5CefQL8PqWwBOBPhiAkyLo5l4ZYvE= +github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/prometheus v1.8.2-0.20210621150501-ff58416a0b02 h1:waKRn/b6LBaXHjQ3dlZd+0li1nIykM34r5XEYr4lTBM= +github.com/prometheus/prometheus v1.8.2-0.20210621150501-ff58416a0b02/go.mod h1:fC6ROpjS/2o+MQTO7X8NSZLhLBSNlDzxaeDMqQm+TUM= github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -888,15 +1064,19 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -905,8 +1085,8 @@ github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0W github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= -github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.6+incompatible h1:mmZtAlWSd8U2HeRTjswbnDLPxqsEoK01NK+GZ1P+nEM= +github.com/shirou/gopsutil v3.21.6+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -915,12 +1095,14 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -928,16 +1110,17 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -948,12 +1131,13 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -971,26 +1155,33 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/Hmo4= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= +github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4= github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= @@ -1003,20 +1194,26 @@ github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHM github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= @@ -1033,29 +1230,57 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.21.0 h1:RMJ6GlUVzLYp/zmItxTTdAmr1gnpO/HHMFmvjAhvJQM= +go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0 h1:68WZYF6CrnsXIVDYc51cR9VmTX2IM7y0svo7s4lu5kQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0 h1:G1vNyNfKknFvrKVC8ga8EYIECy0s5D/QPW4QPRSMhwc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0/go.mod h1:JQAtechjxLEL81EjmbRwxBq/XEzGaHcsPuDHAx54hg4= +go.opentelemetry.io/otel v1.0.0-RC1 h1:4CeoX93DNTWt8awGK9JmNXzF9j7TyOu9upscEdtcdXc= +go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel/internal/metric v0.21.0 h1:gZlIBo5O51hZOOZz8vEcuRx/l5dnADadKfpT70AELoo= +go.opentelemetry.io/otel/internal/metric v0.21.0/go.mod h1:iOfAaY2YycsXfYD4kaRSbLx2LKmfpKObWBEv9QK5zFo= +go.opentelemetry.io/otel/metric v0.21.0 h1:ZtcJlHqVE4l8Su0WOLOd9fEPheJuYEiQ0wr9wv2p25I= +go.opentelemetry.io/otel/metric v0.21.0/go.mod h1:JWCt1bjivC4iCrz/aCrM1GSw+ZcvY44KCbaeeRhzHnc= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk= +go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= +go.opentelemetry.io/otel/trace v1.0.0-RC1 h1:jrjqKJZEibFrDz+umEASeU3LvdVyWKlnTh7XEfwrT58= +go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= +go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1063,17 +1288,24 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1100,8 +1332,9 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1112,6 +1345,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1139,6 +1374,9 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1146,6 +1384,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1163,10 +1402,14 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1178,8 +1421,9 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1193,6 +1437,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1202,6 +1447,9 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1218,8 +1466,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1227,8 +1475,10 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1266,19 +1516,29 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1289,12 +1549,15 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1318,6 +1581,7 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1331,6 +1595,8 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1344,15 +1610,17 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1362,8 +1630,11 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1396,8 +1667,11 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1410,6 +1684,7 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1418,6 +1693,7 @@ google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1432,6 +1708,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1450,23 +1727,31 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f h1:YRBxgxUW6GFi+AKsn8WGA9k1SZohK+gGuEqdeT5aoNQ= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1476,8 +1761,13 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1489,10 +1779,11 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1505,14 +1796,15 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= -gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1521,6 +1813,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1542,26 +1835,37 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= +k8s.io/api v0.21.1 h1:94bbZ5NTjdINJEdzOkpS4vdPhkb1VFpTYC9zh43f75c= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= +k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= +k8s.io/client-go v0.21.1 h1:bhblWYLZKUu+pm50plvQF8WpY6TXdRRtcS/K9WauOj4= +k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/internal/otel_collector/internal/buildscripts/compare-apidiff.sh b/internal/otel_collector/internal/buildscripts/compare-apidiff.sh index c74e7347d4f..c60098fb4bf 100644 --- a/internal/otel_collector/internal/buildscripts/compare-apidiff.sh +++ b/internal/otel_collector/internal/buildscripts/compare-apidiff.sh @@ -5,6 +5,7 @@ usage() { echo "Usage: $0" echo + echo "-c Check-incompatibility mode. Script will fail if an incompatible change is found. Default: 'false'" echo "-p Package to generate API state snapshot of. Default: ''" echo "-d directory where prior states will be read from. Default: './internal/data/apidiff'" exit 1 @@ -12,10 +13,14 @@ usage() { package="" input_dir="./internal/data/apidiff" +check_only=false -while getopts "p:d:" o; do +while getopts "cp:d:" o; do case "${o}" in + c) + check_only=true + ;; p) package=$OPTARG ;; @@ -35,10 +40,17 @@ fi set -e -if [ -d $input_dir/$package ]; then +if [ -e $input_dir/$package/apidiff.state ]; then changes=$(apidiff $input_dir/$package/apidiff.state $package) if [ ! -z "$changes" -a "$changes"!=" " ]; then - echo "Changes found in $package:" - echo "$changes" + SUB='Incompatible changes:' + if [ $check_only = true ] && [[ "$changes" =~ .*"$SUB".* ]]; then + echo "Incompatible Changes Found." + echo "Check the logs in the GitHub Action log group: 'Compare-States'." + exit 1 + else + echo "Changes found in $package:" + echo "$changes" + fi fi -fi \ No newline at end of file +fi diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup.go b/internal/otel_collector/internal/cgroups/cgroup.go similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroup.go rename to internal/otel_collector/internal/cgroups/cgroup.go diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go b/internal/otel_collector/internal/cgroups/cgroups.go similarity index 94% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go rename to internal/otel_collector/internal/cgroups/cgroups.go index 4545114c692..f51cb2f2c41 100644 --- a/internal/otel_collector/processor/memorylimiter/internal/cgroups/cgroups.go +++ b/internal/otel_collector/internal/cgroups/cgroups.go @@ -107,14 +107,14 @@ func NewCGroupsForCurrentProcess() (CGroups, error) { // MemoryQuota returns the total memory a // It is a result of `memory.limit_in_bytes`. If the value of -// `memory.limit_in_bytes` was not set (-1), the method returns `(-1, false, nil)`. +// `memory.limit_in_bytes` was not set (-1) or (9223372036854771712), the method returns `(-1, false, nil)`. func (cg CGroups) MemoryQuota() (int64, bool, error) { - cpuCGroup, exists := cg[_cgroupSubsysMemory] + memCGroup, exists := cg[_cgroupSubsysMemory] if !exists { return -1, false, nil } - memLimitBytes, err := cpuCGroup.readInt(_cgroupMemoryLimitBytes) + memLimitBytes, err := memCGroup.readInt(_cgroupMemoryLimitBytes) if defined := memLimitBytes > 0; err != nil || !defined { return -1, defined, err } diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/doc.go b/internal/otel_collector/internal/cgroups/doc.go similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/doc.go rename to internal/otel_collector/internal/cgroups/doc.go diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/errors.go b/internal/otel_collector/internal/cgroups/errors.go similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/errors.go rename to internal/otel_collector/internal/cgroups/errors.go diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint.go b/internal/otel_collector/internal/cgroups/mountpoint.go similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/mountpoint.go rename to internal/otel_collector/internal/cgroups/mountpoint.go diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys.go b/internal/otel_collector/internal/cgroups/subsys.go similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/subsys.go rename to internal/otel_collector/internal/cgroups/subsys.go diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_period_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/cpu/cpu.cfs_quota_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/empty/cpu.cfs_quota_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/invalid/cpu.cfs_quota_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/undefined-period/cpu.cfs_quota_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_period_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us b/internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us rename to internal/otel_collector/internal/cgroups/testdata/cgroups/undefined/cpu.cfs_quota_us diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/cgroup similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/cgroup rename to internal/otel_collector/internal/cgroups/testdata/proc/cgroups/cgroup diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/cgroups/mountinfo similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/cgroups/mountinfo rename to internal/otel_collector/internal/cgroups/testdata/proc/cgroups/mountinfo diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-cgroup/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-cgroup/cgroup similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-cgroup/cgroup rename to internal/otel_collector/internal/cgroups/testdata/proc/invalid-cgroup/cgroup diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo rename to internal/otel_collector/internal/cgroups/testdata/proc/invalid-mountinfo/mountinfo diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/cgroup b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/cgroup similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/cgroup rename to internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/cgroup diff --git a/internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/mountinfo b/internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/mountinfo similarity index 100% rename from internal/otel_collector/processor/memorylimiter/internal/cgroups/testdata/proc/untranslatable/mountinfo rename to internal/otel_collector/internal/cgroups/testdata/proc/untranslatable/mountinfo diff --git a/internal/otel_collector/internal/data/.gitignore b/internal/otel_collector/internal/data/.gitignore deleted file mode 100644 index 980a4a35c71..00000000000 --- a/internal/otel_collector/internal/data/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.patched-otlp-proto \ No newline at end of file diff --git a/internal/otel_collector/internal/data/bytesid.go b/internal/otel_collector/internal/data/bytesid.go deleted file mode 100644 index 5b02eaa6a45..00000000000 --- a/internal/otel_collector/internal/data/bytesid.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - "encoding/hex" - "errors" - "fmt" -) - -// marshalJSON converts trace id into a hex string enclosed in quotes. -// Called by Protobuf JSON deserialization. -func marshalJSON(id []byte) ([]byte, error) { - if len(id) == 0 { - return []byte(`""`), nil - } - - // 2 chars per byte plus 2 quote chars at the start and end. - hexLen := 2*len(id) + 2 - - b := make([]byte, hexLen) - hex.Encode(b[1:hexLen-1], id) - b[0], b[hexLen-1] = '"', '"' - - return b, nil -} - -// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -// Called by Protobuf JSON deserialization. -func unmarshalJSON(dst []byte, src []byte) error { - if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { - src = src[1 : l-1] - } - nLen := len(src) - if nLen == 0 { - return nil - } - - if len(dst) != hex.DecodedLen(nLen) { - return errors.New("invalid length for ID") - } - - _, err := hex.Decode(dst, src) - if err != nil { - return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) - } - return nil -} - -func marshalBytes(dst []byte, src []byte) (n int, err error) { - if len(dst) < len(src) { - return 0, errors.New("buffer is too short") - } - return copy(dst, src), nil -} diff --git a/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.go b/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.go deleted file mode 100644 index ccf286ef470..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.go +++ /dev/null @@ -1,552 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/logs/v1/logs_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportLogsServiceRequest struct { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` -} - -func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } -func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceRequest) ProtoMessage() {} -func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{0} -} -func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) -} -func (m *ExportLogsServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo - -func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { - if m != nil { - return m.ResourceLogs - } - return nil -} - -type ExportLogsServiceResponse struct { -} - -func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } -func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceResponse) ProtoMessage() {} -func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{1} -} -func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) -} -func (m *ExportLogsServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") - proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) -} - -var fileDescriptor_8e3bf87aaa43acd4 = []byte{ - // 299 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xc8, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0xcf, 0xc9, 0x4f, 0x2f, 0xd6, 0x2f, - 0x33, 0x04, 0xd3, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0x45, 0x42, 0xaa, - 0x28, 0x3a, 0x21, 0x82, 0x7a, 0x70, 0x9d, 0x7a, 0x20, 0x1d, 0x7a, 0x65, 0x86, 0x52, 0x22, 0xe9, - 0xf9, 0xe9, 0xf9, 0x10, 0x63, 0x41, 0x2c, 0x88, 0x3a, 0x29, 0x35, 0x6c, 0xd6, 0x22, 0x5b, 0x06, - 0x51, 0xa7, 0x94, 0xc5, 0x25, 0xe1, 0x5a, 0x51, 0x90, 0x5f, 0x54, 0xe2, 0x93, 0x9f, 0x5e, 0x1c, - 0x0c, 0xb1, 0x3f, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0xc8, 0x8f, 0x8b, 0xb7, 0x28, 0xb5, - 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x35, 0x1e, 0xa4, 0x45, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, - 0x53, 0x0f, 0x9b, 0xc3, 0xa0, 0xce, 0xd1, 0x0b, 0x82, 0xea, 0x00, 0x99, 0x17, 0xc4, 0x53, 0x84, - 0xc4, 0x53, 0x92, 0xe6, 0x92, 0xc4, 0x62, 0x57, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0xd1, 0x5c, - 0x46, 0x2e, 0x6e, 0x24, 0x71, 0xa1, 0x5e, 0x46, 0x2e, 0x36, 0x88, 0x6a, 0x21, 0x7b, 0x3d, 0xa2, - 0x42, 0x42, 0x0f, 0x97, 0x47, 0xa4, 0x1c, 0xc8, 0x37, 0x00, 0xe2, 0x3a, 0x25, 0x06, 0xa7, 0x79, - 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, - 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0xc0, 0xa5, 0x91, 0x99, 0x4f, 0x9c, - 0x05, 0x4e, 0x02, 0x48, 0x66, 0x07, 0x80, 0xd4, 0x04, 0x30, 0x46, 0xb9, 0xa5, 0xa3, 0xeb, 0xce, - 0x44, 0x4e, 0x20, 0x99, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0xfa, 0x29, 0x89, 0x25, 0x89, - 0x90, 0x78, 0x4c, 0x4f, 0xcd, 0xc3, 0x4c, 0x41, 0x49, 0x6c, 0x60, 0x39, 0x63, 0x40, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xaa, 0x3c, 0xaf, 0xaf, 0x71, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LogsServiceClient is the client API for LogsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LogsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) -} - -type logsServiceClient struct { - cc *grpc.ClientConn -} - -func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { - return &logsServiceClient{cc} -} - -func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { - out := new(ExportLogsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LogsServiceServer is the server API for LogsService service. -type LogsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) -} - -// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedLogsServiceServer struct { -} - -func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { - s.RegisterService(&_LogsService_serviceDesc, srv) -} - -func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportLogsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LogsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _LogsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", - HandlerType: (*LogsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _LogsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", -} - -func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int { - offset -= sovLogsService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportLogsServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceLogs) > 0 { - for _, e := range m.ResourceLogs { - l = e.Size() - n += 1 + l + sovLogsService(uint64(l)) - } - } - return n -} - -func (m *ExportLogsServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovLogsService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogsService(x uint64) (n int) { - return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{}) - if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLogsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLogsService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLogsService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLogsService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLogsService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.gw.go b/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.gw.go deleted file mode 100644 index b40b49905fa..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/logs/v1/logs_service.pb.gw.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/logs/v1/logs_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportLogsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportLogsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". -// UnaryRPC :call LogsServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLogsServiceHandlerFromEndpoint instead. -func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { - - mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_LogsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLogsServiceHandler(ctx, mux, conn) -} - -// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) -} - -// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LogsServiceClient" to call the correct interceptors. -func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { - - mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_LogsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_LogsService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go b/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go deleted file mode 100644 index e193ca32a6a..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,552 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{0} -} -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { - if m != nil { - return m.ResourceMetrics - } - return nil -} - -type ExportMetricsServiceResponse struct { -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{1} -} -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) -} - -var fileDescriptor_75fb6015e6e64798 = []byte{ - // 299 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xcb, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x07, 0x89, 0x66, 0x26, 0x17, 0xeb, - 0x97, 0x19, 0xc2, 0x98, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0xa5, 0x42, - 0x1a, 0x28, 0xfa, 0x21, 0x82, 0x7a, 0x70, 0xfd, 0x7a, 0x50, 0x4d, 0x7a, 0x65, 0x86, 0x52, 0x22, - 0xe9, 0xf9, 0xe9, 0xf9, 0x10, 0xf3, 0x41, 0x2c, 0x88, 0x52, 0x29, 0x1d, 0x6c, 0xf6, 0x63, 0xda, - 0x0a, 0x51, 0xad, 0x54, 0xc9, 0x25, 0xed, 0x5a, 0x51, 0x90, 0x5f, 0x54, 0xe2, 0x0b, 0x11, 0x0e, - 0x86, 0xb8, 0x25, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x28, 0x8a, 0x4b, 0xa0, 0x28, 0xb5, - 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x35, 0x1e, 0xaa, 0x51, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, - 0x5f, 0x0f, 0x9b, 0x3b, 0x11, 0xae, 0xd3, 0x0b, 0x82, 0xea, 0x83, 0x1a, 0x1c, 0xc4, 0x5f, 0x84, - 0x2a, 0xa0, 0x24, 0xc7, 0x25, 0x83, 0xdd, 0xea, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x35, - 0x8c, 0x5c, 0x7c, 0xa8, 0x52, 0x42, 0x33, 0x19, 0xb9, 0xd8, 0x20, 0x7a, 0x84, 0x5c, 0xf5, 0x88, - 0x0d, 0x27, 0x3d, 0x3c, 0x1e, 0x94, 0x72, 0xa3, 0xd4, 0x18, 0x88, 0x63, 0x95, 0x18, 0x9c, 0x96, - 0x33, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, - 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x03, 0x97, 0x76, 0x66, 0x3e, 0xd1, - 0xd6, 0x38, 0x09, 0xa3, 0xda, 0x10, 0x00, 0x52, 0x19, 0xc0, 0x18, 0xe5, 0x99, 0x8e, 0x6e, 0x46, - 0x26, 0x72, 0xb2, 0xca, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, - 0x84, 0x44, 0x7a, 0x7a, 0x6a, 0x1e, 0xd6, 0x74, 0x97, 0xc4, 0x06, 0x96, 0x36, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0x1e, 0x0e, 0xdd, 0xdd, 0xaa, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { - out := new(ExportMetricsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) -} - -// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetricsServiceServer struct { -} - -func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportMetricsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _MetricsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", -} - -func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int { - offset -= sovMetricsService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportMetricsServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceMetrics) > 0 { - for _, e := range m.ResourceMetrics { - l = e.Size() - n += 1 + l + sovMetricsService(uint64(l)) - } - } - return n -} - -func (m *ExportMetricsServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovMetricsService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetricsService(x uint64) (n int) { - return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{}) - if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipMetricsService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetricsService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetricsService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetricsService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetricsService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.gw.go b/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.gw.go deleted file mode 100644 index f0dc06e5f5e..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/metrics/v1/metrics_service.pb.gw.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportMetricsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportMetricsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". -// UnaryRPC :call MetricsServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead. -func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { - - mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterMetricsServiceHandler(ctx, mux, conn) -} - -// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) -} - -// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "MetricsServiceClient" to call the correct interceptors. -func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { - - mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_MetricsService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_config.pb.go b/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_config.pb.go deleted file mode 100644 index 3a906f56f7f..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,1249 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/trace/v1/trace_config.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -var ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", -} - -var ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, -} - -func (x ConstantSampler_ConstantDecision) String() string { - return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) -} - -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{1, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - // The global default sampler used to make decisions on span sampling. - // - // Types that are valid to be assigned to Sampler: - // *TraceConfig_ConstantSampler - // *TraceConfig_TraceIdRatioBased - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` - // The global default max number of attributes per timed event. - MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - // The global default max number of attributes per span. - MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` -} - -func (m *TraceConfig) Reset() { *m = TraceConfig{} } -func (m *TraceConfig) String() string { return proto.CompactTextString(m) } -func (*TraceConfig) ProtoMessage() {} -func (*TraceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{0} -} -func (m *TraceConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TraceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceConfig.Merge(m, src) -} -func (m *TraceConfig) XXX_Size() int { - return m.Size() -} -func (m *TraceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TraceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceConfig proto.InternalMessageInfo - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() - MarshalTo([]byte) (int, error) - Size() int -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof" json:"constant_sampler,omitempty"` -} -type TraceConfig_TraceIdRatioBased struct { - TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof" json:"trace_id_ratio_based,omitempty"` -} -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof" json:"rate_limiting_sampler,omitempty"` -} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} -func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (m *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (m *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { - if x, ok := m.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { - return x.TraceIdRatioBased - } - return nil -} - -func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { - if m != nil { - return m.MaxNumberOfAttributes - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfTimedEvents() int64 { - if m != nil { - return m.MaxNumberOfTimedEvents - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { - if m != nil { - return m.MaxNumberOfAttributesPerTimedEvent - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfLinks() int64 { - if m != nil { - return m.MaxNumberOfLinks - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { - if m != nil { - return m.MaxNumberOfAttributesPerLink - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_TraceIdRatioBased)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } -} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` -} - -func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } -func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } -func (*ConstantSampler) ProtoMessage() {} -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{1} -} -func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConstantSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConstantSampler.Merge(m, src) -} -func (m *ConstantSampler) XXX_Size() int { - return m.Size() -} -func (m *ConstantSampler) XXX_DiscardUnknown() { - xxx_messageInfo_ConstantSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo - -func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if m != nil { - return m.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to uniformly sample traces with a given ratio. -// The ratio of sampling a trace is equal to that of the specified ratio. -type TraceIdRatioBased struct { - // The desired ratio of sampling. Must be within [0.0, 1.0]. - SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` -} - -func (m *TraceIdRatioBased) Reset() { *m = TraceIdRatioBased{} } -func (m *TraceIdRatioBased) String() string { return proto.CompactTextString(m) } -func (*TraceIdRatioBased) ProtoMessage() {} -func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{2} -} -func (m *TraceIdRatioBased) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TraceIdRatioBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TraceIdRatioBased.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TraceIdRatioBased) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceIdRatioBased.Merge(m, src) -} -func (m *TraceIdRatioBased) XXX_Size() int { - return m.Size() -} -func (m *TraceIdRatioBased) XXX_DiscardUnknown() { - xxx_messageInfo_TraceIdRatioBased.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceIdRatioBased proto.InternalMessageInfo - -func (m *TraceIdRatioBased) GetSamplingRatio() float64 { - if m != nil { - return m.SamplingRatio - } - return 0 -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` -} - -func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } -func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } -func (*RateLimitingSampler) ProtoMessage() {} -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{3} -} -func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitingSampler.Merge(m, src) -} -func (m *RateLimitingSampler) XXX_Size() int { - return m.Size() -} -func (m *RateLimitingSampler) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo - -func (m *RateLimitingSampler) GetQps() int64 { - if m != nil { - return m.Qps - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) - proto.RegisterType((*TraceConfig)(nil), "opentelemetry.proto.trace.v1.TraceConfig") - proto.RegisterType((*ConstantSampler)(nil), "opentelemetry.proto.trace.v1.ConstantSampler") - proto.RegisterType((*TraceIdRatioBased)(nil), "opentelemetry.proto.trace.v1.TraceIdRatioBased") - proto.RegisterType((*RateLimitingSampler)(nil), "opentelemetry.proto.trace.v1.RateLimitingSampler") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/trace/v1/trace_config.proto", fileDescriptor_5936aa8fa6443e6f) -} - -var fileDescriptor_5936aa8fa6443e6f = []byte{ - // 560 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xda, 0x30, - 0x18, 0xc6, 0x93, 0xb2, 0xfe, 0x7b, 0xab, 0xb6, 0xc1, 0xb4, 0x53, 0x54, 0x55, 0x59, 0x17, 0x4d, - 0x1a, 0x97, 0x12, 0xd1, 0x1d, 0xa6, 0xed, 0x30, 0x09, 0xfa, 0x77, 0x12, 0xa2, 0x28, 0x45, 0x9a, - 0xc6, 0xc5, 0x32, 0x89, 0x89, 0xac, 0x25, 0x36, 0x73, 0x5c, 0xd4, 0xdd, 0xf7, 0x01, 0x76, 0xda, - 0x17, 0xd9, 0x97, 0xd8, 0xb1, 0xc7, 0x1d, 0x27, 0xf8, 0x22, 0x53, 0x1c, 0x4a, 0x81, 0xb6, 0x48, - 0xbb, 0xd9, 0xcf, 0xe3, 0xe7, 0xf7, 0xbe, 0x86, 0x37, 0x06, 0x4f, 0xf4, 0x29, 0x57, 0x34, 0xa6, - 0x09, 0x55, 0xf2, 0x9b, 0xd7, 0x97, 0x42, 0x09, 0x4f, 0x49, 0x12, 0x50, 0x6f, 0x50, 0xcd, 0x17, - 0x38, 0x10, 0xbc, 0xc7, 0xa2, 0x8a, 0xf6, 0xd0, 0xfe, 0x4c, 0x20, 0x17, 0x2b, 0xfa, 0x5c, 0x65, - 0x50, 0xdd, 0xdb, 0x89, 0x44, 0x24, 0x72, 0x48, 0xb6, 0xca, 0x6d, 0xf7, 0xfb, 0x32, 0x6c, 0xb4, - 0xb3, 0x23, 0xc7, 0x9a, 0x84, 0x3a, 0x60, 0x05, 0x82, 0xa7, 0x8a, 0x70, 0x85, 0x53, 0x92, 0xf4, - 0x63, 0x2a, 0x6d, 0xf3, 0xc0, 0x2c, 0x6f, 0x1c, 0x1d, 0x56, 0x16, 0xe1, 0x2b, 0xc7, 0xe3, 0xd4, - 0x55, 0x1e, 0xba, 0x30, 0xfc, 0xed, 0x60, 0x56, 0x42, 0x5d, 0xd8, 0xc9, 0xbb, 0x66, 0x21, 0x96, - 0x44, 0x31, 0x81, 0xbb, 0x24, 0xa5, 0xa1, 0xbd, 0xa4, 0xf9, 0xde, 0x62, 0xbe, 0x6e, 0xf2, 0x63, - 0xe8, 0x67, 0xb9, 0x7a, 0x16, 0xbb, 0x30, 0xfc, 0xa2, 0x9a, 0x17, 0x51, 0x04, 0xbb, 0x92, 0x28, - 0x8a, 0x63, 0x96, 0x30, 0xc5, 0x78, 0x34, 0xb9, 0x44, 0x41, 0x17, 0xa9, 0x2e, 0x2e, 0xe2, 0x13, - 0x45, 0x1b, 0xe3, 0xe4, 0xfd, 0x45, 0x4a, 0xf2, 0xa1, 0x8c, 0xde, 0x82, 0x9d, 0x90, 0x1b, 0xcc, - 0xaf, 0x93, 0x2e, 0x95, 0x58, 0xf4, 0x30, 0x51, 0x4a, 0xb2, 0xee, 0xb5, 0xa2, 0xa9, 0xfd, 0xec, - 0xc0, 0x2c, 0x17, 0xfc, 0xdd, 0x84, 0xdc, 0x34, 0xb5, 0x7d, 0xd9, 0xab, 0x4d, 0x4c, 0xf4, 0x1e, - 0xf6, 0x66, 0x83, 0x8a, 0x25, 0x34, 0xc4, 0x74, 0x40, 0xb9, 0x4a, 0xed, 0x65, 0x1d, 0x7d, 0x3e, - 0x15, 0x6d, 0x67, 0xf6, 0xa9, 0x76, 0x51, 0x1b, 0xca, 0x4f, 0x15, 0xc5, 0x7d, 0x2a, 0xa7, 0x51, - 0xf6, 0x8a, 0x26, 0xb9, 0x8f, 0x36, 0xd1, 0xa2, 0xf2, 0x1e, 0x8b, 0x0e, 0xa1, 0x34, 0x4b, 0x8d, - 0x19, 0xff, 0x92, 0xda, 0xab, 0x1a, 0x60, 0x4d, 0x01, 0x1a, 0x99, 0x8e, 0xce, 0xe1, 0xe5, 0xc2, - 0x26, 0xb2, 0xb4, 0xbd, 0xa6, 0xc3, 0xfb, 0x4f, 0x55, 0xcf, 0x48, 0xf5, 0x75, 0x58, 0x1d, 0xff, - 0x3b, 0xee, 0x2f, 0x13, 0xb6, 0xe7, 0x26, 0x08, 0x75, 0x60, 0x2d, 0xa4, 0x01, 0x4b, 0x99, 0xe0, - 0x7a, 0x04, 0xb7, 0x8e, 0x3e, 0xfc, 0xd7, 0x08, 0x4e, 0xf6, 0x27, 0x63, 0x8a, 0x3f, 0xe1, 0xb9, - 0x27, 0x60, 0xcd, 0xbb, 0x68, 0x0b, 0xa0, 0xd6, 0xf8, 0x54, 0xfb, 0x7c, 0x85, 0x2f, 0xcf, 0xce, - 0x2c, 0x03, 0x6d, 0xc2, 0xfa, 0xdd, 0xbe, 0x69, 0x99, 0xa8, 0x08, 0x9b, 0xe3, 0x6d, 0xab, 0xe6, - 0x9f, 0x36, 0xdb, 0xd6, 0x92, 0xfb, 0x0e, 0x8a, 0x0f, 0xc6, 0x12, 0xbd, 0x82, 0x4d, 0x7d, 0x2b, - 0xc6, 0x23, 0xad, 0xea, 0xde, 0x4d, 0x7f, 0x56, 0x74, 0x5f, 0x43, 0xe9, 0x91, 0x61, 0x43, 0x16, - 0x14, 0xbe, 0xf6, 0x53, 0x1d, 0x29, 0xf8, 0xd9, 0xb2, 0xfe, 0xd3, 0xfc, 0x3d, 0x74, 0xcc, 0xdb, - 0xa1, 0x63, 0xfe, 0x1d, 0x3a, 0xe6, 0x8f, 0x91, 0x63, 0xdc, 0x8e, 0x1c, 0xe3, 0xcf, 0xc8, 0x31, - 0xe0, 0x05, 0x13, 0x0b, 0x7f, 0x90, 0xba, 0x35, 0xf5, 0x65, 0xb7, 0x32, 0xab, 0x65, 0x76, 0xce, - 0xa3, 0xf9, 0x10, 0x13, 0x5e, 0x20, 0xe2, 0x98, 0x06, 0x4a, 0x48, 0x8f, 0x71, 0x45, 0x25, 0x27, - 0xb1, 0x17, 0x12, 0x45, 0xf2, 0x37, 0x27, 0xa2, 0x7c, 0xea, 0xc0, 0xdd, 0x03, 0xd4, 0x5d, 0xd1, - 0xe6, 0x9b, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0x76, 0xce, 0xc4, 0xa7, 0x04, 0x00, 0x00, -} - -func (m *TraceConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TraceConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TraceConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxNumberOfAttributesPerLink != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributesPerLink)) - i-- - dAtA[i] = 0x40 - } - if m.MaxNumberOfLinks != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfLinks)) - i-- - dAtA[i] = 0x38 - } - if m.MaxNumberOfAttributesPerTimedEvent != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributesPerTimedEvent)) - i-- - dAtA[i] = 0x30 - } - if m.MaxNumberOfTimedEvents != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfTimedEvents)) - i-- - dAtA[i] = 0x28 - } - if m.MaxNumberOfAttributes != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.MaxNumberOfAttributes)) - i-- - dAtA[i] = 0x20 - } - if m.Sampler != nil { - { - size := m.Sampler.Size() - i -= size - if _, err := m.Sampler.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *TraceConfig_ConstantSampler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TraceConfig_ConstantSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ConstantSampler != nil { - { - size, err := m.ConstantSampler.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *TraceConfig_TraceIdRatioBased) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TraceConfig_TraceIdRatioBased) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.TraceIdRatioBased != nil { - { - size, err := m.TraceIdRatioBased.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *TraceConfig_RateLimitingSampler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TraceConfig_RateLimitingSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RateLimitingSampler != nil { - { - size, err := m.RateLimitingSampler.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceConfig(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *ConstantSampler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConstantSampler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConstantSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Decision != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.Decision)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TraceIdRatioBased) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TraceIdRatioBased) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TraceIdRatioBased) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SamplingRatio != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SamplingRatio)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *RateLimitingSampler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RateLimitingSampler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RateLimitingSampler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Qps != 0 { - i = encodeVarintTraceConfig(dAtA, i, uint64(m.Qps)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTraceConfig(dAtA []byte, offset int, v uint64) int { - offset -= sovTraceConfig(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *TraceConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sampler != nil { - n += m.Sampler.Size() - } - if m.MaxNumberOfAttributes != 0 { - n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributes)) - } - if m.MaxNumberOfTimedEvents != 0 { - n += 1 + sovTraceConfig(uint64(m.MaxNumberOfTimedEvents)) - } - if m.MaxNumberOfAttributesPerTimedEvent != 0 { - n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributesPerTimedEvent)) - } - if m.MaxNumberOfLinks != 0 { - n += 1 + sovTraceConfig(uint64(m.MaxNumberOfLinks)) - } - if m.MaxNumberOfAttributesPerLink != 0 { - n += 1 + sovTraceConfig(uint64(m.MaxNumberOfAttributesPerLink)) - } - return n -} - -func (m *TraceConfig_ConstantSampler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConstantSampler != nil { - l = m.ConstantSampler.Size() - n += 1 + l + sovTraceConfig(uint64(l)) - } - return n -} -func (m *TraceConfig_TraceIdRatioBased) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TraceIdRatioBased != nil { - l = m.TraceIdRatioBased.Size() - n += 1 + l + sovTraceConfig(uint64(l)) - } - return n -} -func (m *TraceConfig_RateLimitingSampler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RateLimitingSampler != nil { - l = m.RateLimitingSampler.Size() - n += 1 + l + sovTraceConfig(uint64(l)) - } - return n -} -func (m *ConstantSampler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Decision != 0 { - n += 1 + sovTraceConfig(uint64(m.Decision)) - } - return n -} - -func (m *TraceIdRatioBased) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SamplingRatio != 0 { - n += 9 - } - return n -} - -func (m *RateLimitingSampler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Qps != 0 { - n += 1 + sovTraceConfig(uint64(m.Qps)) - } - return n -} - -func sovTraceConfig(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTraceConfig(x uint64) (n int) { - return sovTraceConfig(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *TraceConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TraceConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TraceConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConstantSampler", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ConstantSampler{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sampler = &TraceConfig_ConstantSampler{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceIdRatioBased", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TraceIdRatioBased{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sampler = &TraceConfig_TraceIdRatioBased{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RateLimitingSampler", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceConfig - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceConfig - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RateLimitingSampler{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sampler = &TraceConfig_RateLimitingSampler{v} - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributes", wireType) - } - m.MaxNumberOfAttributes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxNumberOfAttributes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfTimedEvents", wireType) - } - m.MaxNumberOfTimedEvents = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxNumberOfTimedEvents |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributesPerTimedEvent", wireType) - } - m.MaxNumberOfAttributesPerTimedEvent = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxNumberOfAttributesPerTimedEvent |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfLinks", wireType) - } - m.MaxNumberOfLinks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxNumberOfLinks |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxNumberOfAttributesPerLink", wireType) - } - m.MaxNumberOfAttributesPerLink = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxNumberOfAttributesPerLink |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTraceConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConstantSampler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConstantSampler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConstantSampler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Decision", wireType) - } - m.Decision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Decision |= ConstantSampler_ConstantDecision(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTraceConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TraceIdRatioBased) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TraceIdRatioBased: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TraceIdRatioBased: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field SamplingRatio", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.SamplingRatio = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipTraceConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RateLimitingSampler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RateLimitingSampler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RateLimitingSampler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Qps", wireType) - } - m.Qps = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Qps |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTraceConfig(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceConfig - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTraceConfig(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceConfig - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTraceConfig - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTraceConfig - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTraceConfig - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTraceConfig = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTraceConfig = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTraceConfig = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.go b/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.go deleted file mode 100644 index f481fa560c7..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,552 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/trace/v1/trace_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - - v1 "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportTraceServiceRequest struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{0} -} -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return m.Size() -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { - if m != nil { - return m.ResourceSpans - } - return nil -} - -type ExportTraceServiceResponse struct { -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{1} -} -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return m.Size() -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) -} - -var fileDescriptor_192a962890318cf4 = []byte{ - // 299 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x52, 0xcd, 0x4a, 0xc4, 0x30, - 0x10, 0x6e, 0x10, 0xf6, 0x10, 0x7f, 0xc0, 0xe2, 0x41, 0x8b, 0x04, 0xd9, 0x83, 0xac, 0x08, 0x29, - 0xbb, 0xde, 0xbc, 0x59, 0x10, 0xaf, 0x4b, 0xd7, 0x93, 0x17, 0xa9, 0x75, 0x28, 0x85, 0x9a, 0xa9, - 0x93, 0x6c, 0xd1, 0xb7, 0xd0, 0x47, 0xd0, 0xa7, 0xf1, 0xb8, 0x47, 0x8f, 0xd2, 0xbe, 0x88, 0xa4, - 0x51, 0xe9, 0x4a, 0x85, 0x05, 0x6f, 0x93, 0x99, 0xef, 0x2f, 0xc9, 0xf0, 0x53, 0x2c, 0x41, 0x19, - 0x28, 0xe0, 0x0e, 0x0c, 0x3d, 0x86, 0x25, 0xa1, 0xc1, 0x30, 0xc5, 0xa2, 0x80, 0xd4, 0x20, 0x85, - 0x86, 0x92, 0x14, 0xc2, 0x6a, 0xec, 0x8a, 0x6b, 0x0d, 0x54, 0xe5, 0x29, 0xc8, 0x16, 0xe6, 0x1f, - 0x2e, 0x71, 0x5d, 0x53, 0xfe, 0x70, 0x65, 0x4b, 0x91, 0xd5, 0x38, 0xd8, 0xc9, 0x30, 0x43, 0xa7, - 0x6c, 0x2b, 0x07, 0x0c, 0x46, 0x7d, 0xce, 0xcb, 0x7e, 0x0e, 0x39, 0x44, 0xbe, 0x77, 0xfe, 0x50, - 0x22, 0x99, 0x4b, 0xdb, 0x9c, 0xb9, 0x0c, 0x31, 0xdc, 0xcf, 0x41, 0x1b, 0x3f, 0xe6, 0x5b, 0x04, - 0x1a, 0xe7, 0x64, 0xe3, 0x95, 0x89, 0xd2, 0xbb, 0xec, 0x60, 0x6d, 0xb4, 0x3e, 0x39, 0x96, 0x7d, - 0xe9, 0xbe, 0x33, 0xc9, 0xf8, 0x8b, 0x33, 0xb3, 0x94, 0x78, 0x93, 0xba, 0xc7, 0xe1, 0x3e, 0x0f, - 0xfa, 0x0c, 0x75, 0x89, 0x4a, 0xc3, 0xe4, 0x95, 0xf1, 0x8d, 0xee, 0xc0, 0x7f, 0x66, 0x7c, 0xe0, - 0xf0, 0xfe, 0x99, 0x5c, 0xed, 0x4d, 0xe4, 0x9f, 0x17, 0x0a, 0xa2, 0xff, 0x48, 0xb8, 0x88, 0x43, - 0x2f, 0x7a, 0x61, 0x6f, 0xb5, 0x60, 0x8b, 0x5a, 0xb0, 0x8f, 0x5a, 0xb0, 0xa7, 0x46, 0x78, 0x8b, - 0x46, 0x78, 0xef, 0x8d, 0xf0, 0xf8, 0x51, 0x8e, 0x2b, 0x5a, 0x44, 0xdb, 0x5d, 0xf5, 0xa9, 0x45, - 0x4d, 0xd9, 0xd5, 0x45, 0xf6, 0x9b, 0x9f, 0x77, 0x57, 0x26, 0x57, 0x06, 0x48, 0x25, 0x45, 0x78, - 0x9b, 0x98, 0xc4, 0x7d, 0x6b, 0x06, 0xaa, 0x67, 0xa7, 0x6e, 0x06, 0xed, 0xf0, 0xe4, 0x33, 0x00, - 0x00, 0xff, 0xff, 0x53, 0x91, 0xbb, 0xe7, 0x84, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { - out := new(ExportTraceServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) -} - -// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. -type UnimplementedTraceServiceServer struct { -} - -func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportTraceServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TraceServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _TraceService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", -} - -func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTraceService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int { - offset -= sovTraceService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ExportTraceServiceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceSpans) > 0 { - for _, e := range m.ResourceSpans { - l = e.Size() - n += 1 + l + sovTraceService(uint64(l)) - } - } - return n -} - -func (m *ExportTraceServiceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovTraceService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTraceService(x uint64) (n int) { - return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTraceService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTraceService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{}) - if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTraceService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTraceService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTraceService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTraceService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTraceService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTraceService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTraceService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTraceService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTraceService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.gw.go b/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.gw.go deleted file mode 100644 index 18dff3d03e7..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service.pb.gw.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/trace/v1/trace_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportTraceServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportTraceServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". -// UnaryRPC :call TraceServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. -func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterTraceServiceHandler(ctx, mux, conn) -} - -// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) -} - -// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "TraceServiceClient" to call the correct interceptors. -func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_TraceService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service_gateway_aliases.go b/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service_gateway_aliases.go deleted file mode 100644 index 21dfb731e9e..00000000000 --- a/internal/otel_collector/internal/data/protogen/collector/trace/v1/trace_service_gateway_aliases.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - context "context" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" -) - -// The aliases in this file are necessary to fix the bug: -// https://github.com/open-telemetry/opentelemetry-collector/issues/1968 - -// patternTraceServiceExport0Alias is an alias for the incorrect pattern -// pattern_TraceService_Export_0 defined in trace_service.pb.gw.go. -// -// The path in the pattern_TraceService_Export_0 pattern is incorrect because it is -// composed from the historical name of the package v1.trace used in the Protobuf -// declarations in trace_service.proto file and results in the path of /v1/trace. -// -// This is incorrect since the OTLP spec requires the default path to be /v1/traces, -// see https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/protocol/otlp.md#request. -// -// We set the correct path in this alias. -var patternTraceServiceExport0Alias = runtime.MustPattern( - runtime.NewPattern( - 1, - []int{2, 0, 2, 1}, - []string{"v1", "traces"}, // Patch the path to be /v1/traces. - "", - runtime.AssumeColonVerbOpt(true)), -) - -// RegisterTraceServiceHandlerServerAlias registers the http handlers for service TraceService to "mux". -// UnaryRPC :call TraceServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// -// RegisterTraceServiceHandlerServerAlias is the alias version of -// RegisterTraceServiceHandlerServer, and uses patternTraceServiceExport0Alias -// instead of pattern_TraceService_Export_0. -func RegisterTraceServiceHandlerServerAlias(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { - - // pattern_TraceService_Export_0 is replaced by patternTraceServiceExport0Alias - // in the following line. This is the only change in this func compared to - // RegisterTraceServiceHandlerServer. - mux.Handle("POST", patternTraceServiceExport0Alias, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} diff --git a/internal/otel_collector/internal/data/protogen/common/v1/common.pb.go b/internal/otel_collector/internal/data/protogen/common/v1/common.pb.go deleted file mode 100644 index 24307a8f129..00000000000 --- a/internal/otel_collector/internal/data/protogen/common/v1/common.pb.go +++ /dev/null @@ -1,1762 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/common/v1/common.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -type AnyValue struct { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "null". - // - // Types that are valid to be assigned to Value: - // *AnyValue_StringValue - // *AnyValue_BoolValue - // *AnyValue_IntValue - // *AnyValue_DoubleValue - // *AnyValue_ArrayValue - // *AnyValue_KvlistValue - Value isAnyValue_Value `protobuf_oneof:"value"` -} - -func (m *AnyValue) Reset() { *m = AnyValue{} } -func (m *AnyValue) String() string { return proto.CompactTextString(m) } -func (*AnyValue) ProtoMessage() {} -func (*AnyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{0} -} -func (m *AnyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AnyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AnyValue.Merge(m, src) -} -func (m *AnyValue) XXX_Size() int { - return m.Size() -} -func (m *AnyValue) XXX_DiscardUnknown() { - xxx_messageInfo_AnyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AnyValue proto.InternalMessageInfo - -type isAnyValue_Value interface { - isAnyValue_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type AnyValue_StringValue struct { - StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` -} -type AnyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` -} -type AnyValue_IntValue struct { - IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} -type AnyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type AnyValue_ArrayValue struct { - ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` -} -type AnyValue_KvlistValue struct { - KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` -} - -func (*AnyValue_StringValue) isAnyValue_Value() {} -func (*AnyValue_BoolValue) isAnyValue_Value() {} -func (*AnyValue_IntValue) isAnyValue_Value() {} -func (*AnyValue_DoubleValue) isAnyValue_Value() {} -func (*AnyValue_ArrayValue) isAnyValue_Value() {} -func (*AnyValue_KvlistValue) isAnyValue_Value() {} - -func (m *AnyValue) GetValue() isAnyValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AnyValue) GetStringValue() string { - if x, ok := m.GetValue().(*AnyValue_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *AnyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AnyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AnyValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AnyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *AnyValue) GetArrayValue() *ArrayValue { - if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { - return x.ArrayValue - } - return nil -} - -func (m *AnyValue) GetKvlistValue() *KeyValueList { - if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { - return x.KvlistValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AnyValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AnyValue_StringValue)(nil), - (*AnyValue_BoolValue)(nil), - (*AnyValue_IntValue)(nil), - (*AnyValue_DoubleValue)(nil), - (*AnyValue_ArrayValue)(nil), - (*AnyValue_KvlistValue)(nil), - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -type ArrayValue struct { - // Array of values. The array may be empty (contain 0 elements). - Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` -} - -func (m *ArrayValue) Reset() { *m = ArrayValue{} } -func (m *ArrayValue) String() string { return proto.CompactTextString(m) } -func (*ArrayValue) ProtoMessage() {} -func (*ArrayValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{1} -} -func (m *ArrayValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ArrayValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArrayValue.Merge(m, src) -} -func (m *ArrayValue) XXX_Size() int { - return m.Size() -} -func (m *ArrayValue) XXX_DiscardUnknown() { - xxx_messageInfo_ArrayValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ArrayValue proto.InternalMessageInfo - -func (m *ArrayValue) GetValues() []AnyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -type KeyValueList struct { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"` -} - -func (m *KeyValueList) Reset() { *m = KeyValueList{} } -func (m *KeyValueList) String() string { return proto.CompactTextString(m) } -func (*KeyValueList) ProtoMessage() {} -func (*KeyValueList) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{2} -} -func (m *KeyValueList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValueList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValueList.Merge(m, src) -} -func (m *KeyValueList) XXX_Size() int { - return m.Size() -} -func (m *KeyValueList) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValueList.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValueList proto.InternalMessageInfo - -func (m *KeyValueList) GetValues() []KeyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{3} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return m.Size() -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetValue() AnyValue { - if m != nil { - return m.Value - } - return AnyValue{} -} - -// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version -// of KeyValue that only supports string values. -type StringKeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *StringKeyValue) Reset() { *m = StringKeyValue{} } -func (m *StringKeyValue) String() string { return proto.CompactTextString(m) } -func (*StringKeyValue) ProtoMessage() {} -func (*StringKeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{4} -} -func (m *StringKeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StringKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StringKeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StringKeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringKeyValue.Merge(m, src) -} -func (m *StringKeyValue) XXX_Size() int { - return m.Size() -} -func (m *StringKeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_StringKeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StringKeyValue proto.InternalMessageInfo - -func (m *StringKeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *StringKeyValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// InstrumentationLibrary is a message representing the instrumentation library information -// such as the fully qualified name and version. -type InstrumentationLibrary struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` -} - -func (m *InstrumentationLibrary) Reset() { *m = InstrumentationLibrary{} } -func (m *InstrumentationLibrary) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibrary) ProtoMessage() {} -func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{5} -} -func (m *InstrumentationLibrary) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InstrumentationLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InstrumentationLibrary.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InstrumentationLibrary) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibrary.Merge(m, src) -} -func (m *InstrumentationLibrary) XXX_Size() int { - return m.Size() -} -func (m *InstrumentationLibrary) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibrary.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibrary proto.InternalMessageInfo - -func (m *InstrumentationLibrary) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *InstrumentationLibrary) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func init() { - proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") - proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") - proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") - proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") - proto.RegisterType((*StringKeyValue)(nil), "opentelemetry.proto.common.v1.StringKeyValue") - proto.RegisterType((*InstrumentationLibrary)(nil), "opentelemetry.proto.common.v1.InstrumentationLibrary") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) -} - -var fileDescriptor_62ba46dcb97aa817 = []byte{ - // 464 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x6b, 0x13, 0x41, - 0x18, 0xde, 0x69, 0xd2, 0x34, 0x79, 0x37, 0x88, 0x0c, 0x45, 0x82, 0xd0, 0xed, 0x12, 0x0f, 0xae, - 0x0a, 0x59, 0x5a, 0x2f, 0x9e, 0x84, 0xa6, 0x28, 0x11, 0x73, 0x08, 0x5b, 0xf4, 0xe0, 0x45, 0x66, - 0xdb, 0x61, 0x19, 0x3a, 0x3b, 0x53, 0x66, 0x27, 0x0b, 0xfb, 0x13, 0xbc, 0xf9, 0x87, 0xbc, 0xf7, - 0xd8, 0xa3, 0x27, 0x91, 0xe4, 0x8f, 0xc8, 0x7c, 0x6c, 0x5b, 0x7b, 0x48, 0xc9, 0xed, 0x9d, 0x67, - 0x9e, 0x8f, 0x77, 0x3e, 0x5e, 0x78, 0x2d, 0xaf, 0xa8, 0xd0, 0x94, 0xd3, 0x92, 0x6a, 0xd5, 0xa4, - 0x57, 0x4a, 0x6a, 0x99, 0x9e, 0xcb, 0xb2, 0x94, 0x22, 0xad, 0x8f, 0x7c, 0x35, 0xb1, 0x30, 0x3e, - 0xf8, 0x8f, 0xeb, 0xc0, 0x89, 0x67, 0xd4, 0x47, 0xcf, 0xf7, 0x0b, 0x59, 0x48, 0x67, 0x60, 0x2a, - 0xb7, 0x3f, 0xfe, 0xb5, 0x03, 0xfd, 0x13, 0xd1, 0x7c, 0x25, 0x7c, 0x49, 0xf1, 0x0b, 0x18, 0x56, - 0x5a, 0x31, 0x51, 0x7c, 0xaf, 0xcd, 0x7a, 0x84, 0x62, 0x94, 0x0c, 0x66, 0x41, 0x16, 0x3a, 0xd4, - 0x91, 0x0e, 0x01, 0x72, 0x29, 0xb9, 0xa7, 0xec, 0xc4, 0x28, 0xe9, 0xcf, 0x82, 0x6c, 0x60, 0x30, - 0x47, 0x38, 0x80, 0x01, 0x13, 0xda, 0xef, 0x77, 0x62, 0x94, 0x74, 0x66, 0x41, 0xd6, 0x67, 0x42, - 0xdf, 0x86, 0x5c, 0xc8, 0x65, 0xce, 0xa9, 0x67, 0x74, 0x63, 0x94, 0x20, 0x13, 0xe2, 0x50, 0x47, - 0x9a, 0x43, 0x48, 0x94, 0x22, 0x8d, 0xe7, 0xec, 0xc6, 0x28, 0x09, 0x8f, 0x5f, 0x4d, 0x36, 0x9e, - 0x70, 0x72, 0x62, 0x14, 0x56, 0x3f, 0x0b, 0x32, 0x20, 0xb7, 0x2b, 0xbc, 0x80, 0xe1, 0x65, 0xcd, - 0x59, 0xd5, 0x36, 0xd5, 0xb3, 0x76, 0x6f, 0x1e, 0xb1, 0xfb, 0x4c, 0x9d, 0x7c, 0xce, 0x2a, 0x6d, - 0xfa, 0x73, 0x16, 0x16, 0x9a, 0xee, 0xc1, 0xae, 0xb5, 0x1a, 0x9f, 0x01, 0xdc, 0xc5, 0xe2, 0x0f, - 0xd0, 0xb3, 0x70, 0x35, 0x42, 0x71, 0x27, 0x09, 0x8f, 0x5f, 0x3e, 0xd6, 0xb1, 0xbf, 0xf9, 0x69, - 0xf7, 0xfa, 0xcf, 0x61, 0x90, 0x79, 0xf1, 0xf8, 0x0b, 0x0c, 0xef, 0x87, 0x6f, 0x6d, 0xdb, 0x8a, - 0x1f, 0xd8, 0x12, 0xe8, 0xb7, 0x3b, 0xf8, 0x29, 0x74, 0x2e, 0x69, 0xe3, 0x5e, 0x38, 0x33, 0x25, - 0x3e, 0xf5, 0x47, 0xb2, 0x4f, 0xba, 0x75, 0xeb, 0xfe, 0x3a, 0xde, 0xc1, 0x93, 0x33, 0xfb, 0x57, - 0x36, 0x04, 0xed, 0xdf, 0x0f, 0x1a, 0xb4, 0xca, 0x8f, 0xf0, 0xec, 0x93, 0xa8, 0xb4, 0x5a, 0x96, - 0x54, 0x68, 0xa2, 0x99, 0x14, 0x73, 0x96, 0x2b, 0xa2, 0x1a, 0x8c, 0xa1, 0x2b, 0x48, 0xe9, 0x7f, - 0x63, 0x66, 0x6b, 0x3c, 0x82, 0xbd, 0x9a, 0xaa, 0x8a, 0x49, 0xe1, 0x5d, 0xda, 0xe5, 0xf4, 0x07, - 0xba, 0x5e, 0x45, 0xe8, 0x66, 0x15, 0xa1, 0xbf, 0xab, 0x08, 0xfd, 0x5c, 0x47, 0xc1, 0xcd, 0x3a, - 0x0a, 0x7e, 0xaf, 0xa3, 0x00, 0x62, 0x26, 0x37, 0x1f, 0x6a, 0x1a, 0x9e, 0xda, 0x72, 0x61, 0xe0, - 0x05, 0xfa, 0xf6, 0xbe, 0x78, 0x28, 0x60, 0x66, 0xfa, 0x38, 0xa7, 0xe7, 0x5a, 0xaa, 0x94, 0x09, - 0x4d, 0x95, 0x20, 0x3c, 0xbd, 0x20, 0x9a, 0xb8, 0xd9, 0x2c, 0xa8, 0xb8, 0x1b, 0xcf, 0xbc, 0x67, - 0xb1, 0xb7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2a, 0xbe, 0xe0, 0x4f, 0xc6, 0x03, 0x00, 0x00, -} - -func (m *AnyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= len(m.StringValue) - copy(dAtA[i:], m.StringValue) - i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} -func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i-- - if m.BoolValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintCommon(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x18 - return len(dAtA) - i, nil -} -func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x21 - return len(dAtA) - i, nil -} -func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ArrayValue != nil { - { - size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.KvlistValue != nil { - { - size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *ArrayValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KeyValueList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommon(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StringKeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StringKeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StringKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InstrumentationLibrary) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InstrumentationLibrary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InstrumentationLibrary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCommon(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { - offset -= sovCommon(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AnyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - return n -} - -func (m *AnyValue_StringValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.StringValue) - n += 1 + l + sovCommon(uint64(l)) - return n -} -func (m *AnyValue_BoolValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} -func (m *AnyValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovCommon(uint64(m.IntValue)) - return n -} -func (m *AnyValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *AnyValue_ArrayValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArrayValue != nil { - l = m.ArrayValue.Size() - n += 1 + l + sovCommon(uint64(l)) - } - return n -} -func (m *AnyValue_KvlistValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KvlistValue != nil { - l = m.KvlistValue.Size() - n += 1 + l + sovCommon(uint64(l)) - } - return n -} -func (m *ArrayValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Values) > 0 { - for _, e := range m.Values { - l = e.Size() - n += 1 + l + sovCommon(uint64(l)) - } - } - return n -} - -func (m *KeyValueList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Values) > 0 { - for _, e := range m.Values { - l = e.Size() - n += 1 + l + sovCommon(uint64(l)) - } - } - return n -} - -func (m *KeyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = m.Value.Size() - n += 1 + l + sovCommon(uint64(l)) - return n -} - -func (m *StringKeyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func (m *InstrumentationLibrary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovCommon(uint64(l)) - } - return n -} - -func sovCommon(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommon(x uint64) (n int) { - return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AnyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AnyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Value = &AnyValue_BoolValue{b} - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = &AnyValue_IntValue{v} - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))} - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ArrayValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &AnyValue_ArrayValue{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &KeyValueList{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &AnyValue_KvlistValue{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArrayValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, AnyValue{}) - if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyValueList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, KeyValue{}) - if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StringKeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StringKeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StringKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InstrumentationLibrary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InstrumentationLibrary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InstrumentationLibrary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommon - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommon - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommon - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommon(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommon - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommon(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommon - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommon - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommon - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommon - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/logs/v1/logs.pb.go b/internal/otel_collector/internal/data/protogen/logs/v1/logs.pb.go deleted file mode 100644 index 264b5c4f908..00000000000 --- a/internal/otel_collector/internal/data/protogen/logs/v1/logs.pb.go +++ /dev/null @@ -1,1377 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/logs/v1/logs.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" - v11 "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Possible values for LogRecord.SeverityNumber. -type SeverityNumber int32 - -const ( - // UNSPECIFIED is the default SeverityNumber, it MUST not be used. - SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 - SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 - SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 - SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 - SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 - SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 - SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 - SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 - SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 - SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 - SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 - SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 - SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 - SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 - SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 - SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 - SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 - SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 - SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 - SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 - SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 - SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 - SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 - SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 - SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 -) - -var SeverityNumber_name = map[int32]string{ - 0: "SEVERITY_NUMBER_UNSPECIFIED", - 1: "SEVERITY_NUMBER_TRACE", - 2: "SEVERITY_NUMBER_TRACE2", - 3: "SEVERITY_NUMBER_TRACE3", - 4: "SEVERITY_NUMBER_TRACE4", - 5: "SEVERITY_NUMBER_DEBUG", - 6: "SEVERITY_NUMBER_DEBUG2", - 7: "SEVERITY_NUMBER_DEBUG3", - 8: "SEVERITY_NUMBER_DEBUG4", - 9: "SEVERITY_NUMBER_INFO", - 10: "SEVERITY_NUMBER_INFO2", - 11: "SEVERITY_NUMBER_INFO3", - 12: "SEVERITY_NUMBER_INFO4", - 13: "SEVERITY_NUMBER_WARN", - 14: "SEVERITY_NUMBER_WARN2", - 15: "SEVERITY_NUMBER_WARN3", - 16: "SEVERITY_NUMBER_WARN4", - 17: "SEVERITY_NUMBER_ERROR", - 18: "SEVERITY_NUMBER_ERROR2", - 19: "SEVERITY_NUMBER_ERROR3", - 20: "SEVERITY_NUMBER_ERROR4", - 21: "SEVERITY_NUMBER_FATAL", - 22: "SEVERITY_NUMBER_FATAL2", - 23: "SEVERITY_NUMBER_FATAL3", - 24: "SEVERITY_NUMBER_FATAL4", -} - -var SeverityNumber_value = map[string]int32{ - "SEVERITY_NUMBER_UNSPECIFIED": 0, - "SEVERITY_NUMBER_TRACE": 1, - "SEVERITY_NUMBER_TRACE2": 2, - "SEVERITY_NUMBER_TRACE3": 3, - "SEVERITY_NUMBER_TRACE4": 4, - "SEVERITY_NUMBER_DEBUG": 5, - "SEVERITY_NUMBER_DEBUG2": 6, - "SEVERITY_NUMBER_DEBUG3": 7, - "SEVERITY_NUMBER_DEBUG4": 8, - "SEVERITY_NUMBER_INFO": 9, - "SEVERITY_NUMBER_INFO2": 10, - "SEVERITY_NUMBER_INFO3": 11, - "SEVERITY_NUMBER_INFO4": 12, - "SEVERITY_NUMBER_WARN": 13, - "SEVERITY_NUMBER_WARN2": 14, - "SEVERITY_NUMBER_WARN3": 15, - "SEVERITY_NUMBER_WARN4": 16, - "SEVERITY_NUMBER_ERROR": 17, - "SEVERITY_NUMBER_ERROR2": 18, - "SEVERITY_NUMBER_ERROR3": 19, - "SEVERITY_NUMBER_ERROR4": 20, - "SEVERITY_NUMBER_FATAL": 21, - "SEVERITY_NUMBER_FATAL2": 22, - "SEVERITY_NUMBER_FATAL3": 23, - "SEVERITY_NUMBER_FATAL4": 24, -} - -func (x SeverityNumber) String() string { - return proto.EnumName(SeverityNumber_name, int32(x)) -} - -func (SeverityNumber) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} - -// Masks for LogRecord.flags field. -type LogRecordFlags int32 - -const ( - LogRecordFlags_LOG_RECORD_FLAG_UNSPECIFIED LogRecordFlags = 0 - LogRecordFlags_LOG_RECORD_FLAG_TRACE_FLAGS_MASK LogRecordFlags = 255 -) - -var LogRecordFlags_name = map[int32]string{ - 0: "LOG_RECORD_FLAG_UNSPECIFIED", - 255: "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", -} - -var LogRecordFlags_value = map[string]int32{ - "LOG_RECORD_FLAG_UNSPECIFIED": 0, - "LOG_RECORD_FLAG_TRACE_FLAGS_MASK": 255, -} - -func (x LogRecordFlags) String() string { - return proto.EnumName(LogRecordFlags_name, int32(x)) -} - -func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} - -// A collection of InstrumentationLibraryLogs from a Resource. -type ResourceLogs struct { - // The resource for the logs in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of InstrumentationLibraryLogs that originate from a resource. - InstrumentationLibraryLogs []*InstrumentationLibraryLogs `protobuf:"bytes,2,rep,name=instrumentation_library_logs,json=instrumentationLibraryLogs,proto3" json:"instrumentation_library_logs,omitempty"` -} - -func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } -func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } -func (*ResourceLogs) ProtoMessage() {} -func (*ResourceLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} -func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceLogs.Merge(m, src) -} -func (m *ResourceLogs) XXX_Size() int { - return m.Size() -} -func (m *ResourceLogs) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo - -func (m *ResourceLogs) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceLogs) GetInstrumentationLibraryLogs() []*InstrumentationLibraryLogs { - if m != nil { - return m.InstrumentationLibraryLogs - } - return nil -} - -// A collection of Logs produced by an InstrumentationLibrary. -type InstrumentationLibraryLogs struct { - // The instrumentation library information for the logs in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` - // A list of log records. - Logs []*LogRecord `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"` -} - -func (m *InstrumentationLibraryLogs) Reset() { *m = InstrumentationLibraryLogs{} } -func (m *InstrumentationLibraryLogs) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibraryLogs) ProtoMessage() {} -func (*InstrumentationLibraryLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} -func (m *InstrumentationLibraryLogs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InstrumentationLibraryLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InstrumentationLibraryLogs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InstrumentationLibraryLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibraryLogs.Merge(m, src) -} -func (m *InstrumentationLibraryLogs) XXX_Size() int { - return m.Size() -} -func (m *InstrumentationLibraryLogs) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibraryLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibraryLogs proto.InternalMessageInfo - -func (m *InstrumentationLibraryLogs) GetInstrumentationLibrary() v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return v11.InstrumentationLibrary{} -} - -func (m *InstrumentationLibraryLogs) GetLogs() []*LogRecord { - if m != nil { - return m.Logs - } - return nil -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md -type LogRecord struct { - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` - // Short event identifier that does not contain varying parts. Name describes - // what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 - // characters. Not guaranteed to be unique in any way. [Optional]. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"` - // Additional attributes that describe the specific event occurrence. [Optional]. - Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"` - DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & TRACE_FLAGS_MASK). [Optional]. - Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. Can be set for logs that are part of request processing - // and have an assigned trace id. [Optional]. - TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. Can be set for logs that are part of a particular processing span. - // If span_id is present trace_id SHOULD be also present. [Optional]. - SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` -} - -func (m *LogRecord) Reset() { *m = LogRecord{} } -func (m *LogRecord) String() string { return proto.CompactTextString(m) } -func (*LogRecord) ProtoMessage() {} -func (*LogRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{2} -} -func (m *LogRecord) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LogRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogRecord.Merge(m, src) -} -func (m *LogRecord) XXX_Size() int { - return m.Size() -} -func (m *LogRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogRecord proto.InternalMessageInfo - -func (m *LogRecord) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *LogRecord) GetSeverityNumber() SeverityNumber { - if m != nil { - return m.SeverityNumber - } - return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED -} - -func (m *LogRecord) GetSeverityText() string { - if m != nil { - return m.SeverityText - } - return "" -} - -func (m *LogRecord) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LogRecord) GetBody() v11.AnyValue { - if m != nil { - return m.Body - } - return v11.AnyValue{} -} - -func (m *LogRecord) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *LogRecord) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *LogRecord) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) - proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) - proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") - proto.RegisterType((*InstrumentationLibraryLogs)(nil), "opentelemetry.proto.logs.v1.InstrumentationLibraryLogs") - proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) -} - -var fileDescriptor_d1c030a3ec7e961e = []byte{ - // 846 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcf, 0x6f, 0x22, 0x37, - 0x14, 0xc7, 0x71, 0x42, 0x20, 0x71, 0x08, 0xeb, 0xba, 0xd9, 0xec, 0x94, 0x54, 0x04, 0xa5, 0xed, - 0x96, 0xa6, 0x12, 0x28, 0x03, 0x55, 0xab, 0x6d, 0x2f, 0x43, 0x18, 0xa2, 0x51, 0x08, 0x89, 0x0c, - 0x49, 0x7f, 0x5c, 0x46, 0x03, 0xb8, 0x68, 0x24, 0xb0, 0xd1, 0x8c, 0x89, 0xc2, 0xb9, 0xff, 0x40, - 0xff, 0xa7, 0x5e, 0xf6, 0xd4, 0xee, 0xa9, 0xaa, 0x7a, 0x58, 0x55, 0xc9, 0x1f, 0xd2, 0xca, 0x66, - 0xa0, 0x0b, 0x1a, 0xb3, 0xca, 0x09, 0xcf, 0xfb, 0xbc, 0xef, 0xf7, 0x3d, 0x3f, 0xc6, 0x06, 0xf8, - 0x92, 0x8f, 0x29, 0x13, 0x74, 0x48, 0x47, 0x54, 0x04, 0xd3, 0xf2, 0x38, 0xe0, 0x82, 0x97, 0x87, - 0x7c, 0x10, 0x96, 0xef, 0x4e, 0xd5, 0x67, 0x49, 0x85, 0xf0, 0xe1, 0x52, 0xde, 0x2c, 0x58, 0x52, - 0xfc, 0xee, 0x34, 0xb7, 0x3f, 0xe0, 0x03, 0x3e, 0x93, 0xca, 0xd5, 0x8c, 0xe6, 0x4e, 0xe2, 0xac, - 0x7b, 0x7c, 0x34, 0xe2, 0x4c, 0x9a, 0xcf, 0x56, 0x51, 0x6e, 0x29, 0x2e, 0x37, 0xa0, 0x21, 0x9f, - 0x04, 0x3d, 0x2a, 0xb3, 0xe7, 0xeb, 0x59, 0xfe, 0xf1, 0x9f, 0x00, 0x66, 0x48, 0x14, 0x6a, 0xf2, - 0x41, 0x88, 0x2f, 0xe0, 0xf6, 0x3c, 0xc5, 0x00, 0x05, 0x50, 0xdc, 0x35, 0xbf, 0x28, 0xc5, 0xb5, - 0xbc, 0xf0, 0xb9, 0x3b, 0x2d, 0xcd, 0x0d, 0x6a, 0xc9, 0xd7, 0x6f, 0x8f, 0x12, 0x64, 0x61, 0x80, - 0xa7, 0xf0, 0x63, 0x9f, 0x85, 0x22, 0x98, 0x8c, 0x28, 0x13, 0x9e, 0xf0, 0x39, 0x73, 0x87, 0x7e, - 0x37, 0xf0, 0x82, 0xa9, 0x2b, 0xb7, 0x6c, 0x6c, 0x14, 0x36, 0x8b, 0xbb, 0xe6, 0xd7, 0xa5, 0x35, - 0x33, 0x29, 0x39, 0xcb, 0x06, 0xcd, 0x99, 0x5e, 0xf6, 0x4a, 0x72, 0xbe, 0x96, 0x1d, 0xff, 0x0e, - 0x60, 0x4e, 0x2f, 0xc5, 0x02, 0xbe, 0xd0, 0x74, 0x16, 0xed, 0xfa, 0xab, 0xd8, 0xa6, 0xa2, 0x59, - 0x6b, 0xdb, 0x8a, 0x26, 0x70, 0x10, 0xdf, 0x18, 0x7e, 0x05, 0x93, 0xef, 0xec, 0xfb, 0xe5, 0xda, - 0x7d, 0x37, 0xf9, 0x80, 0xd0, 0x1e, 0x0f, 0xfa, 0x44, 0x69, 0x8e, 0xff, 0x48, 0xc2, 0x9d, 0x45, - 0x0c, 0x7f, 0x0a, 0xb3, 0xc2, 0x1f, 0x51, 0x77, 0xc2, 0xfc, 0x7b, 0x97, 0x79, 0x8c, 0xab, 0xb6, - 0x53, 0x24, 0x23, 0xa3, 0x37, 0xcc, 0xbf, 0x6f, 0x79, 0x8c, 0xe3, 0x0e, 0x7c, 0x16, 0xd2, 0x3b, - 0x1a, 0xf8, 0x62, 0xea, 0xb2, 0xc9, 0xa8, 0x4b, 0x03, 0x63, 0xa3, 0x00, 0x8a, 0x59, 0xf3, 0xcb, - 0xb5, 0xa5, 0xdb, 0x91, 0xa6, 0xa5, 0x24, 0x24, 0x1b, 0x2e, 0x3d, 0xe3, 0x4f, 0xe0, 0xde, 0xc2, - 0x55, 0xd0, 0x7b, 0x61, 0x6c, 0x16, 0x40, 0x71, 0x87, 0x64, 0xe6, 0xc1, 0x0e, 0xbd, 0x17, 0x18, - 0xc3, 0x24, 0xf3, 0x46, 0xd4, 0x48, 0x2a, 0xa6, 0xd6, 0xd8, 0x82, 0xc9, 0x2e, 0xef, 0x4f, 0x8d, - 0x2d, 0x35, 0xe1, 0xcf, 0xdf, 0x33, 0x61, 0x8b, 0x4d, 0x6f, 0xbd, 0xe1, 0x64, 0xfe, 0x56, 0x29, - 0x29, 0xbe, 0x84, 0xd0, 0x13, 0x22, 0xf0, 0xbb, 0x13, 0x41, 0x43, 0x23, 0xa5, 0xe6, 0xf8, 0x3e, - 0xa3, 0x0b, 0xba, 0x64, 0xf4, 0x8e, 0x01, 0xfe, 0x06, 0x1a, 0xfd, 0x80, 0x8f, 0xc7, 0xb4, 0xef, - 0xfe, 0x1f, 0x75, 0x7b, 0x7c, 0xc2, 0x84, 0x91, 0x2e, 0x80, 0xe2, 0x1e, 0x39, 0x88, 0xb8, 0xb5, - 0xc0, 0x67, 0x92, 0xe2, 0x7d, 0xb8, 0xf5, 0xf3, 0xd0, 0x1b, 0x84, 0xc6, 0x76, 0x01, 0x14, 0xd3, - 0x64, 0xf6, 0x80, 0x6f, 0xe1, 0xb6, 0x08, 0xbc, 0x1e, 0x75, 0xfd, 0xbe, 0xb1, 0x53, 0x00, 0xc5, - 0x4c, 0xed, 0x5b, 0x59, 0xf3, 0xef, 0xb7, 0x47, 0x95, 0x01, 0x5f, 0x69, 0xd3, 0x97, 0x87, 0x78, - 0x38, 0xa4, 0x3d, 0xc1, 0x83, 0xb2, 0xcf, 0x04, 0x0d, 0x98, 0x37, 0x2c, 0xf7, 0x3d, 0xe1, 0x95, - 0x3a, 0xd2, 0xc3, 0xa9, 0x93, 0xb4, 0x32, 0x73, 0xfa, 0xb8, 0x0d, 0xd3, 0xe1, 0xd8, 0x63, 0xd2, - 0x16, 0x2a, 0xdb, 0x57, 0x91, 0xad, 0xf9, 0x14, 0xdb, 0xf6, 0xd8, 0x63, 0x4e, 0x9d, 0xa4, 0xa4, - 0x95, 0xd3, 0x3f, 0xf9, 0x6d, 0x0b, 0x66, 0x97, 0xbf, 0x6a, 0x7c, 0x04, 0x0f, 0xdb, 0xf6, 0xad, - 0x4d, 0x9c, 0xce, 0x8f, 0x6e, 0xeb, 0xe6, 0xb2, 0x66, 0x13, 0xf7, 0xa6, 0xd5, 0xbe, 0xb6, 0xcf, - 0x9c, 0x86, 0x63, 0xd7, 0x51, 0x02, 0x7f, 0x04, 0x9f, 0xaf, 0x26, 0x74, 0x88, 0x75, 0x66, 0x23, - 0x80, 0x73, 0xf0, 0x20, 0x16, 0x99, 0x68, 0x43, 0xcb, 0x2a, 0x68, 0x53, 0xcb, 0xaa, 0x28, 0x19, - 0x57, 0xae, 0x6e, 0xd7, 0x6e, 0xce, 0xd1, 0x56, 0x9c, 0x4c, 0x21, 0x13, 0xa5, 0xb4, 0xac, 0x82, - 0xd2, 0x5a, 0x56, 0x45, 0xdb, 0xd8, 0x80, 0xfb, 0xab, 0xcc, 0x69, 0x35, 0xae, 0xd0, 0x4e, 0x5c, - 0x23, 0x92, 0x98, 0x08, 0xea, 0x50, 0x05, 0xed, 0xea, 0x50, 0x15, 0x65, 0xe2, 0x4a, 0x7d, 0x6f, - 0x91, 0x16, 0xda, 0x8b, 0x13, 0x49, 0x62, 0xa2, 0xac, 0x0e, 0x55, 0xd0, 0x33, 0x1d, 0xaa, 0x22, - 0x14, 0x87, 0x6c, 0x42, 0xae, 0x08, 0xfa, 0x20, 0x6e, 0x18, 0x0a, 0x99, 0x08, 0x6b, 0x59, 0x05, - 0x7d, 0xa8, 0x65, 0x55, 0xb4, 0x1f, 0x57, 0xae, 0x61, 0x75, 0xac, 0x26, 0x7a, 0x1e, 0x27, 0x53, - 0xc8, 0x44, 0x07, 0x5a, 0x56, 0x41, 0x2f, 0xb4, 0xac, 0x8a, 0x8c, 0x93, 0x1f, 0x60, 0x76, 0x71, - 0x2d, 0x36, 0xd4, 0x21, 0x3c, 0x82, 0x87, 0xcd, 0xab, 0x73, 0x97, 0xd8, 0x67, 0x57, 0xa4, 0xee, - 0x36, 0x9a, 0xd6, 0xf9, 0xca, 0x4b, 0xfc, 0x19, 0x2c, 0xac, 0x26, 0xa8, 0x37, 0x4e, 0x2d, 0xdb, - 0xee, 0xa5, 0xd5, 0xbe, 0x40, 0xff, 0x82, 0xda, 0x2f, 0xe0, 0xf5, 0x43, 0x1e, 0xbc, 0x79, 0xc8, - 0x83, 0x7f, 0x1e, 0xf2, 0xe0, 0xd7, 0xc7, 0x7c, 0xe2, 0xcd, 0x63, 0x3e, 0xf1, 0xd7, 0x63, 0x3e, - 0x01, 0xf3, 0x3e, 0x5f, 0x77, 0x83, 0xd6, 0xe4, 0x4d, 0x1d, 0x5e, 0xcb, 0xd0, 0x35, 0xf8, 0xe9, - 0xbb, 0x27, 0x9c, 0xd1, 0xd9, 0x2f, 0xf6, 0x80, 0xb2, 0xf9, 0x7f, 0x87, 0x6e, 0x4a, 0x45, 0x2a, - 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xd4, 0x39, 0x3e, 0x61, 0x08, 0x00, 0x00, -} - -func (m *ResourceLogs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.InstrumentationLibraryLogs) > 0 { - for iNdEx := len(m.InstrumentationLibraryLogs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InstrumentationLibraryLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *InstrumentationLibraryLogs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InstrumentationLibraryLogs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InstrumentationLibraryLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Logs) > 0 { - for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LogRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - if m.Flags != 0 { - i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags)) - i-- - dAtA[i] = 0x45 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x38 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - { - size, err := m.Body.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogs(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintLogs(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - } - if len(m.SeverityText) > 0 { - i -= len(m.SeverityText) - copy(dAtA[i:], m.SeverityText) - i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText))) - i-- - dAtA[i] = 0x1a - } - if m.SeverityNumber != 0 { - i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber)) - i-- - dAtA[i] = 0x10 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func encodeVarintLogs(dAtA []byte, offset int, v uint64) int { - offset -= sovLogs(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceLogs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.InstrumentationLibraryLogs) > 0 { - for _, e := range m.InstrumentationLibraryLogs { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - return n -} - -func (m *InstrumentationLibraryLogs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.InstrumentationLibrary.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - return n -} - -func (m *LogRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TimeUnixNano != 0 { - n += 9 - } - if m.SeverityNumber != 0 { - n += 1 + sovLogs(uint64(m.SeverityNumber)) - } - l = len(m.SeverityText) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovLogs(uint64(l)) - } - l = m.Body.Size() - n += 1 + l + sovLogs(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovLogs(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovLogs(uint64(m.DroppedAttributesCount)) - } - if m.Flags != 0 { - n += 5 - } - l = m.TraceId.Size() - n += 1 + l + sovLogs(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovLogs(uint64(l)) - return n -} - -func sovLogs(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogs(x uint64) (n int) { - return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceLogs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibraryLogs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InstrumentationLibraryLogs = append(m.InstrumentationLibraryLogs, &InstrumentationLibraryLogs{}) - if err := m.InstrumentationLibraryLogs[len(m.InstrumentationLibraryLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InstrumentationLibraryLogs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InstrumentationLibraryLogs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InstrumentationLibraryLogs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Logs = append(m.Logs, &LogRecord{}) - if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LogRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType) - } - m.SeverityNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SeverityNumber |= SeverityNumber(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SeverityText = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - m.Flags = 0 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogs - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthLogs - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthLogs - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogs(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogs - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLogs(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLogs - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLogs - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLogs - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/metrics/v1/metrics.pb.go b/internal/otel_collector/internal/data/protogen/metrics/v1/metrics.pb.go deleted file mode 100644 index e3d2e7e7303..00000000000 --- a/internal/otel_collector/internal/data/protogen/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,6320 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/metrics/v1/metrics.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" - v11 "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -type AggregationTemporality int32 - -const ( - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 -) - -var AggregationTemporality_name = map[int32]string{ - 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1: "AGGREGATION_TEMPORALITY_DELTA", - 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", -} - -var AggregationTemporality_value = map[string]int32{ - "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, - "AGGREGATION_TEMPORALITY_DELTA": 1, - "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, -} - -func (x AggregationTemporality) String() string { - return proto.EnumName(AggregationTemporality_name, int32(x)) -} - -func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} - -// A collection of InstrumentationLibraryMetrics from a Resource. -type ResourceMetrics struct { - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of metrics that originate from a resource. - InstrumentationLibraryMetrics []*InstrumentationLibraryMetrics `protobuf:"bytes,2,rep,name=instrumentation_library_metrics,json=instrumentationLibraryMetrics,proto3" json:"instrumentation_library_metrics,omitempty"` -} - -func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } -func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } -func (*ResourceMetrics) ProtoMessage() {} -func (*ResourceMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} -func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetrics.Merge(m, src) -} -func (m *ResourceMetrics) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo - -func (m *ResourceMetrics) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceMetrics) GetInstrumentationLibraryMetrics() []*InstrumentationLibraryMetrics { - if m != nil { - return m.InstrumentationLibraryMetrics - } - return nil -} - -// A collection of Metrics produced by an InstrumentationLibrary. -type InstrumentationLibraryMetrics struct { - // The instrumentation library information for the metrics in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` - // A list of metrics that originate from an instrumentation library. - Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` -} - -func (m *InstrumentationLibraryMetrics) Reset() { *m = InstrumentationLibraryMetrics{} } -func (m *InstrumentationLibraryMetrics) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibraryMetrics) ProtoMessage() {} -func (*InstrumentationLibraryMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{1} -} -func (m *InstrumentationLibraryMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InstrumentationLibraryMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InstrumentationLibraryMetrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InstrumentationLibraryMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibraryMetrics.Merge(m, src) -} -func (m *InstrumentationLibraryMetrics) XXX_Size() int { - return m.Size() -} -func (m *InstrumentationLibraryMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibraryMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibraryMetrics proto.InternalMessageInfo - -func (m *InstrumentationLibraryMetrics) GetInstrumentationLibrary() v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return v11.InstrumentationLibrary{} -} - -func (m *InstrumentationLibraryMetrics) GetMetrics() []*Metric { - if m != nil { - return m.Metrics - } - return nil -} - -// Defines a Metric which has one or more timeseries. -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Gauge, Sum, Histogram, etc.). -// - DataPoint contains timestamps, labels, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// All DataPoint types have three common fields: -// - Labels zero or more key-value pairs associated with the data point. -// - StartTimeUnixNano MUST be set to the start of the interval when the data's -// type includes an AggregationTemporality. This field is not set otherwise. -// - TimeUnixNano MUST be set to: -// - the moment when an aggregation is reported (independent of the -// aggregation temporality). -// - the instantaneous time of the event. -type Metric struct { - // name of the metric, including its DNS name prefix. It must be unique. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - // - // TODO: Update table after the decision on: - // https://github.com/open-telemetry/opentelemetry-specification/issues/731. - // By default, metrics recording using the OpenTelemetry API are exported as - // (the table does not include MeasurementValueType to avoid extra rows): - // - // Instrument Type - // ---------------------------------------------- - // Counter Sum(aggregation_temporality=delta;is_monotonic=true) - // UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false) - // ValueRecorder TBD - // SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true) - // UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false) - // ValueObserver Gauge() - // - // Types that are valid to be assigned to Data: - // *Metric_IntGauge - // *Metric_DoubleGauge - // *Metric_IntSum - // *Metric_DoubleSum - // *Metric_IntHistogram - // *Metric_DoubleHistogram - // *Metric_DoubleSummary - Data isMetric_Data `protobuf_oneof:"data"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{2} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -type isMetric_Data interface { - isMetric_Data() - MarshalTo([]byte) (int, error) - Size() int -} - -type Metric_IntGauge struct { - IntGauge *IntGauge `protobuf:"bytes,4,opt,name=int_gauge,json=intGauge,proto3,oneof" json:"int_gauge,omitempty"` -} -type Metric_DoubleGauge struct { - DoubleGauge *DoubleGauge `protobuf:"bytes,5,opt,name=double_gauge,json=doubleGauge,proto3,oneof" json:"double_gauge,omitempty"` -} -type Metric_IntSum struct { - IntSum *IntSum `protobuf:"bytes,6,opt,name=int_sum,json=intSum,proto3,oneof" json:"int_sum,omitempty"` -} -type Metric_DoubleSum struct { - DoubleSum *DoubleSum `protobuf:"bytes,7,opt,name=double_sum,json=doubleSum,proto3,oneof" json:"double_sum,omitempty"` -} -type Metric_IntHistogram struct { - IntHistogram *IntHistogram `protobuf:"bytes,8,opt,name=int_histogram,json=intHistogram,proto3,oneof" json:"int_histogram,omitempty"` -} -type Metric_DoubleHistogram struct { - DoubleHistogram *DoubleHistogram `protobuf:"bytes,9,opt,name=double_histogram,json=doubleHistogram,proto3,oneof" json:"double_histogram,omitempty"` -} -type Metric_DoubleSummary struct { - DoubleSummary *DoubleSummary `protobuf:"bytes,11,opt,name=double_summary,json=doubleSummary,proto3,oneof" json:"double_summary,omitempty"` -} - -func (*Metric_IntGauge) isMetric_Data() {} -func (*Metric_DoubleGauge) isMetric_Data() {} -func (*Metric_IntSum) isMetric_Data() {} -func (*Metric_DoubleSum) isMetric_Data() {} -func (*Metric_IntHistogram) isMetric_Data() {} -func (*Metric_DoubleHistogram) isMetric_Data() {} -func (*Metric_DoubleSummary) isMetric_Data() {} - -func (m *Metric) GetData() isMetric_Data { - if m != nil { - return m.Data - } - return nil -} - -func (m *Metric) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Metric) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Metric) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *Metric) GetIntGauge() *IntGauge { - if x, ok := m.GetData().(*Metric_IntGauge); ok { - return x.IntGauge - } - return nil -} - -func (m *Metric) GetDoubleGauge() *DoubleGauge { - if x, ok := m.GetData().(*Metric_DoubleGauge); ok { - return x.DoubleGauge - } - return nil -} - -func (m *Metric) GetIntSum() *IntSum { - if x, ok := m.GetData().(*Metric_IntSum); ok { - return x.IntSum - } - return nil -} - -func (m *Metric) GetDoubleSum() *DoubleSum { - if x, ok := m.GetData().(*Metric_DoubleSum); ok { - return x.DoubleSum - } - return nil -} - -func (m *Metric) GetIntHistogram() *IntHistogram { - if x, ok := m.GetData().(*Metric_IntHistogram); ok { - return x.IntHistogram - } - return nil -} - -func (m *Metric) GetDoubleHistogram() *DoubleHistogram { - if x, ok := m.GetData().(*Metric_DoubleHistogram); ok { - return x.DoubleHistogram - } - return nil -} - -func (m *Metric) GetDoubleSummary() *DoubleSummary { - if x, ok := m.GetData().(*Metric_DoubleSummary); ok { - return x.DoubleSummary - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Metric) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Metric_IntGauge)(nil), - (*Metric_DoubleGauge)(nil), - (*Metric_IntSum)(nil), - (*Metric_DoubleSum)(nil), - (*Metric_IntHistogram)(nil), - (*Metric_DoubleHistogram)(nil), - (*Metric_DoubleSummary)(nil), - } -} - -// Gauge represents the type of a int scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -type IntGauge struct { - DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` -} - -func (m *IntGauge) Reset() { *m = IntGauge{} } -func (m *IntGauge) String() string { return proto.CompactTextString(m) } -func (*IntGauge) ProtoMessage() {} -func (*IntGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{3} -} -func (m *IntGauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntGauge.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntGauge.Merge(m, src) -} -func (m *IntGauge) XXX_Size() int { - return m.Size() -} -func (m *IntGauge) XXX_DiscardUnknown() { - xxx_messageInfo_IntGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_IntGauge proto.InternalMessageInfo - -func (m *IntGauge) GetDataPoints() []*IntDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// Gauge represents the type of a double scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -type DoubleGauge struct { - DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` -} - -func (m *DoubleGauge) Reset() { *m = DoubleGauge{} } -func (m *DoubleGauge) String() string { return proto.CompactTextString(m) } -func (*DoubleGauge) ProtoMessage() {} -func (*DoubleGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{4} -} -func (m *DoubleGauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleGauge.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleGauge.Merge(m, src) -} -func (m *DoubleGauge) XXX_Size() int { - return m.Size() -} -func (m *DoubleGauge) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleGauge proto.InternalMessageInfo - -func (m *DoubleGauge) GetDataPoints() []*DoubleDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// Sum represents the type of a numeric int scalar metric that is calculated as -// a sum of all reported measurements over a time interval. -type IntSum struct { - DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. - IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` -} - -func (m *IntSum) Reset() { *m = IntSum{} } -func (m *IntSum) String() string { return proto.CompactTextString(m) } -func (*IntSum) ProtoMessage() {} -func (*IntSum) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{5} -} -func (m *IntSum) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntSum.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntSum) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntSum.Merge(m, src) -} -func (m *IntSum) XXX_Size() int { - return m.Size() -} -func (m *IntSum) XXX_DiscardUnknown() { - xxx_messageInfo_IntSum.DiscardUnknown(m) -} - -var xxx_messageInfo_IntSum proto.InternalMessageInfo - -func (m *IntSum) GetDataPoints() []*IntDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *IntSum) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func (m *IntSum) GetIsMonotonic() bool { - if m != nil { - return m.IsMonotonic - } - return false -} - -// Sum represents the type of a numeric double scalar metric that is calculated -// as a sum of all reported measurements over a time interval. -type DoubleSum struct { - DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. - IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` -} - -func (m *DoubleSum) Reset() { *m = DoubleSum{} } -func (m *DoubleSum) String() string { return proto.CompactTextString(m) } -func (*DoubleSum) ProtoMessage() {} -func (*DoubleSum) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{6} -} -func (m *DoubleSum) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleSum.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleSum) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleSum.Merge(m, src) -} -func (m *DoubleSum) XXX_Size() int { - return m.Size() -} -func (m *DoubleSum) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleSum.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleSum proto.InternalMessageInfo - -func (m *DoubleSum) GetDataPoints() []*DoubleDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *DoubleSum) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func (m *DoubleSum) GetIsMonotonic() bool { - if m != nil { - return m.IsMonotonic - } - return false -} - -// Represents the type of a metric that is calculated by aggregating as a -// Histogram of all reported int measurements over a time interval. -type IntHistogram struct { - DataPoints []*IntHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *IntHistogram) Reset() { *m = IntHistogram{} } -func (m *IntHistogram) String() string { return proto.CompactTextString(m) } -func (*IntHistogram) ProtoMessage() {} -func (*IntHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{7} -} -func (m *IntHistogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntHistogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntHistogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntHistogram.Merge(m, src) -} -func (m *IntHistogram) XXX_Size() int { - return m.Size() -} -func (m *IntHistogram) XXX_DiscardUnknown() { - xxx_messageInfo_IntHistogram.DiscardUnknown(m) -} - -var xxx_messageInfo_IntHistogram proto.InternalMessageInfo - -func (m *IntHistogram) GetDataPoints() []*IntHistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *IntHistogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// Represents the type of a metric that is calculated by aggregating as a -// Histogram of all reported double measurements over a time interval. -type DoubleHistogram struct { - DataPoints []*DoubleHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *DoubleHistogram) Reset() { *m = DoubleHistogram{} } -func (m *DoubleHistogram) String() string { return proto.CompactTextString(m) } -func (*DoubleHistogram) ProtoMessage() {} -func (*DoubleHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{8} -} -func (m *DoubleHistogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleHistogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleHistogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleHistogram.Merge(m, src) -} -func (m *DoubleHistogram) XXX_Size() int { - return m.Size() -} -func (m *DoubleHistogram) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleHistogram.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleHistogram proto.InternalMessageInfo - -func (m *DoubleHistogram) GetDataPoints() []*DoubleHistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *DoubleHistogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// DoubleSummary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -type DoubleSummary struct { - DataPoints []*DoubleSummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` -} - -func (m *DoubleSummary) Reset() { *m = DoubleSummary{} } -func (m *DoubleSummary) String() string { return proto.CompactTextString(m) } -func (*DoubleSummary) ProtoMessage() {} -func (*DoubleSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{9} -} -func (m *DoubleSummary) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleSummary.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleSummary) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleSummary.Merge(m, src) -} -func (m *DoubleSummary) XXX_Size() int { - return m.Size() -} -func (m *DoubleSummary) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleSummary.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleSummary proto.InternalMessageInfo - -func (m *DoubleSummary) GetDataPoints() []*DoubleSummaryDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// IntDataPoint is a single data point in a timeseries that describes the -// time-varying values of a int64 metric. -type IntDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // value itself. - Value int64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []IntExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"` -} - -func (m *IntDataPoint) Reset() { *m = IntDataPoint{} } -func (m *IntDataPoint) String() string { return proto.CompactTextString(m) } -func (*IntDataPoint) ProtoMessage() {} -func (*IntDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{10} -} -func (m *IntDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntDataPoint.Merge(m, src) -} -func (m *IntDataPoint) XXX_Size() int { - return m.Size() -} -func (m *IntDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_IntDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_IntDataPoint proto.InternalMessageInfo - -func (m *IntDataPoint) GetLabels() []v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *IntDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *IntDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntDataPoint) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *IntDataPoint) GetExemplars() []IntExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// DoubleDataPoint is a single data point in a timeseries that describes the -// time-varying value of a double metric. -type DoubleDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // value itself. - Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []DoubleExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"` -} - -func (m *DoubleDataPoint) Reset() { *m = DoubleDataPoint{} } -func (m *DoubleDataPoint) String() string { return proto.CompactTextString(m) } -func (*DoubleDataPoint) ProtoMessage() {} -func (*DoubleDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{11} -} -func (m *DoubleDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleDataPoint.Merge(m, src) -} -func (m *DoubleDataPoint) XXX_Size() int { - return m.Size() -} -func (m *DoubleDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleDataPoint proto.InternalMessageInfo - -func (m *DoubleDataPoint) GetLabels() []v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *DoubleDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *DoubleDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleDataPoint) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DoubleDataPoint) GetExemplars() []DoubleExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// IntHistogramDataPoint is a single data point in a timeseries that describes -// the time-varying values of a Histogram of int values. A Histogram contains -// summary statistics for a population of values, it may optionally contain -// the distribution of those values across a set of buckets. -type IntHistogramDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. This value must be equal to the sum of the "sum" fields in - // buckets if a histogram is provided. - Sum int64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // The bucket boundaries are described by "bounds" field. - // - // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket - // at index i are: - // - // (-infinity, bounds[i]) for i == 0 - // [bounds[i-1], bounds[i]) for 0 < i < N-1 - // [bounds[i], +infinity) for i == N-1 - // The values in bounds array must be strictly increasing. - // - // Note: only [a, b) intervals are currently supported for each bucket except the first one. - // If we decide to also support (a, b] intervals we should add support for these by defining - // a boolean value which decides what type of intervals to use. - ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []IntExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"` -} - -func (m *IntHistogramDataPoint) Reset() { *m = IntHistogramDataPoint{} } -func (m *IntHistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*IntHistogramDataPoint) ProtoMessage() {} -func (*IntHistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{12} -} -func (m *IntHistogramDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntHistogramDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntHistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntHistogramDataPoint.Merge(m, src) -} -func (m *IntHistogramDataPoint) XXX_Size() int { - return m.Size() -} -func (m *IntHistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_IntHistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_IntHistogramDataPoint proto.InternalMessageInfo - -func (m *IntHistogramDataPoint) GetLabels() []v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *IntHistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *IntHistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntHistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *IntHistogramDataPoint) GetSum() int64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *IntHistogramDataPoint) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -func (m *IntHistogramDataPoint) GetExplicitBounds() []float64 { - if m != nil { - return m.ExplicitBounds - } - return nil -} - -func (m *IntHistogramDataPoint) GetExemplars() []IntExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram of double values. A Histogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -type DoubleHistogramDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. This value must be equal to the sum of the "sum" fields in - // buckets if a histogram is provided. - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // The bucket boundaries are described by "bounds" field. - // - // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket - // at index i are: - // - // (-infinity, bounds[i]) for i == 0 - // [bounds[i-1], bounds[i]) for 0 < i < N-1 - // [bounds[i], +infinity) for i == N-1 - // The values in bounds array must be strictly increasing. - // - // Note: only [a, b) intervals are currently supported for each bucket except the first one. - // If we decide to also support (a, b] intervals we should add support for these by defining - // a boolean value which decides what type of intervals to use. - ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []DoubleExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"` -} - -func (m *DoubleHistogramDataPoint) Reset() { *m = DoubleHistogramDataPoint{} } -func (m *DoubleHistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*DoubleHistogramDataPoint) ProtoMessage() {} -func (*DoubleHistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{13} -} -func (m *DoubleHistogramDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleHistogramDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleHistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleHistogramDataPoint.Merge(m, src) -} -func (m *DoubleHistogramDataPoint) XXX_Size() int { - return m.Size() -} -func (m *DoubleHistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleHistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleHistogramDataPoint proto.InternalMessageInfo - -func (m *DoubleHistogramDataPoint) GetLabels() []v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetExplicitBounds() []float64 { - if m != nil { - return m.ExplicitBounds - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetExemplars() []DoubleExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// DoubleSummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. -type DoubleSummaryDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - QuantileValues []*DoubleSummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` -} - -func (m *DoubleSummaryDataPoint) Reset() { *m = DoubleSummaryDataPoint{} } -func (m *DoubleSummaryDataPoint) String() string { return proto.CompactTextString(m) } -func (*DoubleSummaryDataPoint) ProtoMessage() {} -func (*DoubleSummaryDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{14} -} -func (m *DoubleSummaryDataPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleSummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleSummaryDataPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleSummaryDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleSummaryDataPoint.Merge(m, src) -} -func (m *DoubleSummaryDataPoint) XXX_Size() int { - return m.Size() -} -func (m *DoubleSummaryDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleSummaryDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleSummaryDataPoint proto.InternalMessageInfo - -func (m *DoubleSummaryDataPoint) GetLabels() []v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *DoubleSummaryDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *DoubleSummaryDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleSummaryDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DoubleSummaryDataPoint) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DoubleSummaryDataPoint) GetQuantileValues() []*DoubleSummaryDataPoint_ValueAtQuantile { - if m != nil { - return m.QuantileValues - } - return nil -} - -// Represents the value at a given quantile of a distribution. -// -// To record Min and Max values following conventions are used: -// - The 1.0 quantile is equivalent to the maximum value observed. -// - The 0.0 quantile is equivalent to the minimum value observed. -// -// See the following issue for more context: -// https://github.com/open-telemetry/opentelemetry-proto/issues/125 -type DoubleSummaryDataPoint_ValueAtQuantile struct { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` - // The value at the given quantile of a distribution. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) Reset() { - *m = DoubleSummaryDataPoint_ValueAtQuantile{} -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) } -func (*DoubleSummaryDataPoint_ValueAtQuantile) ProtoMessage() {} -func (*DoubleSummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{14, 0} -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.Merge(m, src) -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_Size() int { - return m.Size() -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleSummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) GetQuantile() float64 { - if m != nil { - return m.Quantile - } - return 0 -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -// A representation of an exemplar, which is a sample input int measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -type IntExemplar struct { - // The set of labels that were filtered out by the aggregator, but recorded - // alongside the original measurement. Only labels that were filtered out - // by the aggregator should be included - FilteredLabels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels"` - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical int value of the measurement that was recorded. - Value int64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` -} - -func (m *IntExemplar) Reset() { *m = IntExemplar{} } -func (m *IntExemplar) String() string { return proto.CompactTextString(m) } -func (*IntExemplar) ProtoMessage() {} -func (*IntExemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{15} -} -func (m *IntExemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IntExemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IntExemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntExemplar.Merge(m, src) -} -func (m *IntExemplar) XXX_Size() int { - return m.Size() -} -func (m *IntExemplar) XXX_DiscardUnknown() { - xxx_messageInfo_IntExemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_IntExemplar proto.InternalMessageInfo - -func (m *IntExemplar) GetFilteredLabels() []v11.StringKeyValue { - if m != nil { - return m.FilteredLabels - } - return nil -} - -func (m *IntExemplar) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntExemplar) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -// A representation of an exemplar, which is a sample input double measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -type DoubleExemplar struct { - // The set of labels that were filtered out by the aggregator, but recorded - // alongside the original measurement. Only labels that were filtered out - // by the aggregator should be included - FilteredLabels []v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels"` - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical double value of the measurement that was recorded. - Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` -} - -func (m *DoubleExemplar) Reset() { *m = DoubleExemplar{} } -func (m *DoubleExemplar) String() string { return proto.CompactTextString(m) } -func (*DoubleExemplar) ProtoMessage() {} -func (*DoubleExemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{16} -} -func (m *DoubleExemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoubleExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoubleExemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoubleExemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleExemplar.Merge(m, src) -} -func (m *DoubleExemplar) XXX_Size() int { - return m.Size() -} -func (m *DoubleExemplar) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleExemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleExemplar proto.InternalMessageInfo - -func (m *DoubleExemplar) GetFilteredLabels() []v11.StringKeyValue { - if m != nil { - return m.FilteredLabels - } - return nil -} - -func (m *DoubleExemplar) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleExemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") - proto.RegisterType((*InstrumentationLibraryMetrics)(nil), "opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics") - proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") - proto.RegisterType((*IntGauge)(nil), "opentelemetry.proto.metrics.v1.IntGauge") - proto.RegisterType((*DoubleGauge)(nil), "opentelemetry.proto.metrics.v1.DoubleGauge") - proto.RegisterType((*IntSum)(nil), "opentelemetry.proto.metrics.v1.IntSum") - proto.RegisterType((*DoubleSum)(nil), "opentelemetry.proto.metrics.v1.DoubleSum") - proto.RegisterType((*IntHistogram)(nil), "opentelemetry.proto.metrics.v1.IntHistogram") - proto.RegisterType((*DoubleHistogram)(nil), "opentelemetry.proto.metrics.v1.Histogram") - proto.RegisterType((*DoubleSummary)(nil), "opentelemetry.proto.metrics.v1.Summary") - proto.RegisterType((*IntDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntDataPoint") - proto.RegisterType((*DoubleDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleDataPoint") - proto.RegisterType((*IntHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntHistogramDataPoint") - proto.RegisterType((*DoubleHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogramDataPoint") - proto.RegisterType((*DoubleSummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleSummaryDataPoint") - proto.RegisterType((*DoubleSummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.DoubleSummaryDataPoint.ValueAtQuantile") - proto.RegisterType((*IntExemplar)(nil), "opentelemetry.proto.metrics.v1.IntExemplar") - proto.RegisterType((*DoubleExemplar)(nil), "opentelemetry.proto.metrics.v1.DoubleExemplar") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) -} - -var fileDescriptor_3c3112f9fa006917 = []byte{ - // 1257 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xda, 0x89, 0xe3, 0x3c, 0x3b, 0x76, 0x18, 0x95, 0x74, 0x15, 0xa9, 0x6e, 0xea, 0xa2, - 0x36, 0xf4, 0x87, 0xad, 0xa6, 0x6a, 0x85, 0x40, 0x08, 0x9c, 0xc4, 0x4d, 0x4c, 0x93, 0xd6, 0x4c, - 0x9c, 0xa0, 0xa2, 0x4a, 0xab, 0xb5, 0x77, 0x70, 0x47, 0xec, 0xce, 0x98, 0xdd, 0xd9, 0x28, 0xb9, - 0x22, 0x21, 0x71, 0xa8, 0x04, 0x57, 0xb8, 0xf0, 0xef, 0xf4, 0xd8, 0x03, 0x52, 0x11, 0x12, 0x15, - 0x6a, 0x25, 0x2e, 0x9c, 0xf8, 0x0f, 0xd0, 0xcc, 0xee, 0xc6, 0x76, 0xb2, 0x8e, 0x1d, 0x5a, 0xa4, - 0xb4, 0xb7, 0xb7, 0x6f, 0xde, 0x7c, 0xf3, 0xbd, 0xef, 0xbd, 0x79, 0xbb, 0x36, 0x5c, 0xe3, 0x5d, - 0xc2, 0x04, 0xb1, 0x89, 0x43, 0x84, 0xbb, 0x5f, 0xe9, 0xba, 0x5c, 0xf0, 0x8a, 0xb4, 0x69, 0xdb, - 0xab, 0xec, 0xde, 0x88, 0xcc, 0xb2, 0x5a, 0x40, 0xc5, 0x81, 0xe8, 0xc0, 0x59, 0x8e, 0x42, 0x76, - 0x6f, 0xcc, 0x9f, 0xe9, 0xf0, 0x0e, 0x0f, 0x30, 0xa4, 0x15, 0x04, 0xcc, 0x5f, 0x89, 0x3b, 0xa3, - 0xcd, 0x1d, 0x87, 0x33, 0x79, 0x44, 0x60, 0x85, 0xb1, 0xe5, 0xb8, 0x58, 0x97, 0x78, 0xdc, 0x77, - 0xdb, 0x44, 0x46, 0x47, 0x76, 0x10, 0x5f, 0xfa, 0x4b, 0x83, 0x02, 0x0e, 0x5d, 0x9b, 0x01, 0x11, - 0x74, 0x17, 0x32, 0x51, 0x94, 0xae, 0x2d, 0x68, 0x8b, 0xd9, 0xa5, 0xf7, 0xcb, 0x71, 0xc4, 0x0f, - 0xa0, 0x76, 0x6f, 0x94, 0x23, 0x8c, 0xe5, 0x89, 0x27, 0xcf, 0xcf, 0x27, 0xf0, 0x01, 0x00, 0xfa, - 0x4e, 0x83, 0xf3, 0x94, 0x79, 0xc2, 0xf5, 0x1d, 0xc2, 0x84, 0x29, 0x28, 0x67, 0x86, 0x4d, 0x5b, - 0xae, 0xe9, 0xee, 0x1b, 0x61, 0xe6, 0x7a, 0x72, 0x21, 0xb5, 0x98, 0x5d, 0xfa, 0xb8, 0x7c, 0xbc, - 0x3a, 0xe5, 0xfa, 0x20, 0xcc, 0x46, 0x80, 0x12, 0xb2, 0xc6, 0xe7, 0xe8, 0x71, 0xcb, 0xa5, 0x67, - 0x1a, 0x9c, 0x3b, 0x16, 0x00, 0x09, 0x38, 0x3b, 0x84, 0x68, 0xa8, 0xc2, 0xad, 0x58, 0x82, 0xa1, - 0xfc, 0x43, 0xf9, 0x85, 0x8a, 0xcc, 0xc5, 0xd3, 0x43, 0x9f, 0xc2, 0xd4, 0xa0, 0x0c, 0x97, 0x46, - 0xc9, 0x10, 0xf0, 0xc5, 0xd1, 0xb6, 0xd2, 0x0f, 0x93, 0x90, 0x0e, 0x7c, 0x08, 0xc1, 0x04, 0x33, - 0x9d, 0xa0, 0x6a, 0xd3, 0x58, 0xd9, 0x68, 0x01, 0xb2, 0x16, 0xf1, 0xda, 0x2e, 0xed, 0xca, 0x63, - 0xf5, 0xa4, 0x5a, 0xea, 0x77, 0xc9, 0x5d, 0x3e, 0xa3, 0x42, 0x4f, 0x05, 0xbb, 0xa4, 0x8d, 0xd6, - 0x60, 0x9a, 0x32, 0x61, 0x74, 0x4c, 0xbf, 0x43, 0xf4, 0x09, 0x95, 0xfe, 0xe2, 0xe8, 0xfa, 0x88, - 0x35, 0x19, 0xbf, 0x9e, 0xc0, 0x19, 0x1a, 0xda, 0xa8, 0x01, 0x39, 0x8b, 0xfb, 0x2d, 0x9b, 0x84, - 0x58, 0x93, 0x0a, 0xeb, 0xea, 0x28, 0xac, 0x55, 0xb5, 0x27, 0x82, 0xcb, 0x5a, 0xbd, 0x47, 0x54, - 0x85, 0x29, 0x49, 0xcd, 0xf3, 0x1d, 0x3d, 0xad, 0xc0, 0x2e, 0x8d, 0x41, 0x6c, 0xcb, 0x77, 0xd6, - 0x13, 0x38, 0x4d, 0x95, 0x85, 0x3e, 0x03, 0x08, 0x49, 0x49, 0x94, 0xa9, 0x63, 0x7a, 0xfc, 0x08, - 0xa5, 0x00, 0x68, 0xda, 0x8a, 0x1e, 0xd0, 0x16, 0xcc, 0x48, 0x3a, 0x8f, 0xa8, 0x27, 0x78, 0xc7, - 0x35, 0x1d, 0x3d, 0xa3, 0xe0, 0xae, 0x8d, 0x41, 0x6a, 0x3d, 0xda, 0xb3, 0x9e, 0xc0, 0x39, 0xda, - 0xf7, 0x8c, 0x1e, 0xc2, 0x6c, 0x48, 0xb0, 0x87, 0x3b, 0xad, 0x70, 0x2b, 0xe3, 0xd1, 0xec, 0x87, - 0x2e, 0x58, 0x83, 0x2e, 0xb4, 0x03, 0xf9, 0x5e, 0xfa, 0x8e, 0x6c, 0xf0, 0xac, 0xc2, 0xbe, 0x3e, - 0xb6, 0x04, 0x72, 0xd3, 0x7a, 0x02, 0xcf, 0x58, 0xfd, 0x8e, 0xe5, 0x34, 0x4c, 0x58, 0xa6, 0x30, - 0x4b, 0x0f, 0x20, 0x13, 0xf5, 0x02, 0xda, 0x84, 0xac, 0xf4, 0x19, 0x5d, 0x4e, 0x99, 0xf0, 0x74, - 0x4d, 0xf5, 0xf8, 0x38, 0xe2, 0xac, 0x9a, 0xc2, 0x6c, 0xc8, 0x4d, 0x18, 0xac, 0xc8, 0xf4, 0x4a, - 0x06, 0x64, 0xfb, 0x5a, 0x03, 0x35, 0xe2, 0xd0, 0xc7, 0x94, 0x28, 0xfe, 0x80, 0xbf, 0x35, 0x48, - 0x07, 0xfd, 0xf2, 0x9a, 0xa9, 0x23, 0x0e, 0x67, 0xcd, 0x4e, 0xc7, 0x25, 0x9d, 0x60, 0xb6, 0x08, - 0xe2, 0x74, 0xb9, 0x6b, 0xda, 0x54, 0xec, 0xab, 0x4b, 0x99, 0x5f, 0xba, 0x3d, 0x0a, 0xba, 0xda, - 0xdb, 0xde, 0xec, 0xed, 0xc6, 0x73, 0x66, 0xac, 0x1f, 0x5d, 0x80, 0x1c, 0xf5, 0x0c, 0x87, 0x33, - 0x2e, 0x38, 0xa3, 0x6d, 0x75, 0xbf, 0x33, 0x38, 0x4b, 0xbd, 0xcd, 0xc8, 0x55, 0xfa, 0x47, 0x83, - 0xe9, 0x83, 0xa2, 0xbe, 0x7e, 0x35, 0x4f, 0x65, 0xce, 0xcf, 0x34, 0xc8, 0xf5, 0x5f, 0x3e, 0xb4, - 0x13, 0x97, 0xf6, 0xad, 0x93, 0xdc, 0xdf, 0xd3, 0x91, 0x7c, 0xe9, 0x0f, 0x0d, 0x0a, 0x87, 0xae, - 0x3f, 0x7a, 0x10, 0x97, 0xdc, 0x07, 0x27, 0x1c, 0x22, 0xa7, 0x24, 0xbf, 0x47, 0x30, 0x33, 0x30, - 0x81, 0xd0, 0x17, 0x71, 0xc9, 0xdd, 0x3e, 0xd1, 0x14, 0x8b, 0x9f, 0x02, 0x3f, 0x25, 0x55, 0x8f, - 0x1c, 0x2c, 0xa2, 0xbb, 0x90, 0xb6, 0xcd, 0x16, 0xb1, 0xa3, 0x43, 0xae, 0x8f, 0xf8, 0x16, 0xd8, - 0x12, 0x2e, 0x65, 0x9d, 0xbb, 0x64, 0x7f, 0xc7, 0xb4, 0xfd, 0xe8, 0xab, 0x28, 0x84, 0x40, 0x15, - 0x38, 0xe3, 0x09, 0xd3, 0x15, 0x86, 0xa0, 0x0e, 0x31, 0x7c, 0x46, 0xf7, 0x0c, 0x66, 0x32, 0xae, - 0x54, 0x4b, 0xe3, 0x77, 0xd4, 0x5a, 0x93, 0x3a, 0x64, 0x9b, 0xd1, 0xbd, 0x7b, 0x26, 0xe3, 0xe8, - 0x3d, 0xc8, 0x1f, 0x0a, 0x4d, 0xa9, 0xd0, 0x9c, 0xe8, 0x8f, 0x3a, 0x03, 0x93, 0xbb, 0xf2, 0x34, - 0xf5, 0xbe, 0x9e, 0xc5, 0xc1, 0x03, 0xba, 0x0f, 0xd3, 0x64, 0x8f, 0x38, 0x5d, 0xdb, 0x74, 0x3d, - 0x7d, 0x52, 0x91, 0xbf, 0x3a, 0x46, 0x6f, 0xd7, 0xc2, 0x3d, 0x21, 0xf5, 0x1e, 0x46, 0xe9, 0x97, - 0x64, 0xd4, 0x65, 0x6f, 0xb0, 0x3c, 0x5a, 0x24, 0x0f, 0x3e, 0x2a, 0x4f, 0x79, 0xbc, 0x06, 0x1a, - 0xae, 0xd0, 0xb7, 0x29, 0x78, 0x37, 0x76, 0x3c, 0xbc, 0x29, 0x3a, 0xb5, 0xb9, 0xcf, 0x84, 0xd2, - 0x29, 0x8d, 0x83, 0x07, 0x34, 0x0b, 0x29, 0xf9, 0xad, 0x34, 0xa9, 0x5a, 0x4b, 0x9a, 0xe8, 0x22, - 0xcc, 0xb4, 0xfc, 0xf6, 0xd7, 0x44, 0x18, 0x2a, 0xc2, 0xd3, 0xd3, 0x0b, 0x29, 0x09, 0x16, 0x38, - 0x57, 0x94, 0x0f, 0x5d, 0x86, 0x02, 0xd9, 0xeb, 0xda, 0xb4, 0x4d, 0x85, 0xd1, 0xe2, 0x3e, 0xb3, - 0x3c, 0x7d, 0x6a, 0x21, 0xb5, 0xa8, 0xe1, 0x7c, 0xe4, 0x5e, 0x56, 0xde, 0xc1, 0x36, 0xcd, 0xbc, - 0x86, 0x36, 0xfd, 0x3e, 0x05, 0xfa, 0xb0, 0x31, 0xf6, 0x76, 0xd4, 0x41, 0xfb, 0x3f, 0xea, 0x80, - 0x8f, 0xd6, 0xe1, 0x95, 0xef, 0xc3, 0xcf, 0x29, 0x98, 0x8b, 0x1f, 0xba, 0x6f, 0x55, 0x21, 0x38, - 0x14, 0xbe, 0xf1, 0x4d, 0x26, 0xa8, 0x4d, 0x0c, 0x35, 0x5c, 0x82, 0x52, 0x64, 0x97, 0xee, 0xfc, - 0xb7, 0x37, 0x52, 0x59, 0xe5, 0x58, 0x15, 0x9f, 0x87, 0xa0, 0x38, 0x1f, 0xc1, 0xab, 0x05, 0x6f, - 0x7e, 0x05, 0x0a, 0x87, 0x42, 0xd0, 0x3c, 0x64, 0xa2, 0x20, 0xf5, 0x2b, 0x50, 0xc3, 0x07, 0xcf, - 0xbd, 0x01, 0x98, 0xec, 0x1b, 0x80, 0xa5, 0x5f, 0x93, 0x90, 0xed, 0xbb, 0x48, 0xe8, 0x21, 0x14, - 0xbe, 0xa2, 0xb6, 0x20, 0x2e, 0xb1, 0x8c, 0x57, 0x2f, 0x4d, 0x3e, 0xc2, 0xda, 0x08, 0x4a, 0x74, - 0x54, 0xf1, 0xe4, 0x71, 0xa3, 0x3a, 0xd5, 0xff, 0x26, 0xdb, 0x82, 0x29, 0xaf, 0x6b, 0x32, 0x83, - 0x5a, 0xaa, 0x12, 0xb9, 0xe5, 0x0f, 0xe5, 0x11, 0xbf, 0x3f, 0x3f, 0xbf, 0xd4, 0xe1, 0x87, 0xb8, - 0x51, 0x5e, 0x69, 0x73, 0xdb, 0x26, 0x6d, 0xc1, 0xdd, 0x0a, 0x65, 0x82, 0xb8, 0xcc, 0xb4, 0x2b, - 0xf2, 0x05, 0x5f, 0xde, 0xea, 0x9a, 0xac, 0xbe, 0x8a, 0xd3, 0x12, 0xaa, 0x6e, 0xa1, 0x1d, 0xc8, - 0x08, 0xd7, 0x6c, 0x13, 0x89, 0x3a, 0xa9, 0x50, 0x3f, 0x0a, 0x51, 0x6f, 0x9e, 0x04, 0xb5, 0x29, - 0x31, 0xea, 0xab, 0x78, 0x4a, 0x81, 0xd5, 0xad, 0xd2, 0xb3, 0x24, 0xe4, 0x07, 0xef, 0xc5, 0xe9, - 0x53, 0x56, 0x7b, 0x13, 0x95, 0xbd, 0xf2, 0x58, 0x83, 0xb9, 0xf8, 0x0f, 0x47, 0x74, 0x19, 0x2e, - 0x56, 0xd7, 0xd6, 0x70, 0x6d, 0xad, 0xda, 0xac, 0xdf, 0xbf, 0x67, 0x34, 0x6b, 0x9b, 0x8d, 0xfb, - 0xb8, 0xba, 0x51, 0x6f, 0x3e, 0x30, 0xb6, 0xef, 0x6d, 0x35, 0x6a, 0x2b, 0xf5, 0x3b, 0xf5, 0xda, - 0xea, 0x6c, 0x02, 0x5d, 0x80, 0x73, 0xc3, 0x02, 0x57, 0x6b, 0x1b, 0xcd, 0xea, 0xac, 0x86, 0x2e, - 0x41, 0x69, 0x58, 0xc8, 0xca, 0xf6, 0xe6, 0xf6, 0x46, 0xb5, 0x59, 0xdf, 0xa9, 0xcd, 0x26, 0x97, - 0x1f, 0x6b, 0x4f, 0x5e, 0x14, 0xb5, 0xa7, 0x2f, 0x8a, 0xda, 0x9f, 0x2f, 0x8a, 0xda, 0x8f, 0x2f, - 0x8b, 0x89, 0xa7, 0x2f, 0x8b, 0x89, 0xdf, 0x5e, 0x16, 0x13, 0x70, 0x81, 0xf2, 0x11, 0x37, 0x7f, - 0x39, 0x17, 0xfe, 0xfb, 0xd4, 0x90, 0x0b, 0x0d, 0xed, 0xcb, 0x4f, 0x4e, 0x20, 0x4d, 0xf0, 0xef, - 0x5e, 0x87, 0xb0, 0xbe, 0x3f, 0x1c, 0x5b, 0x69, 0xe5, 0xbc, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x25, 0x35, 0xe9, 0x7a, 0x99, 0x14, 0x00, 0x00, -} - -func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.InstrumentationLibraryMetrics) > 0 { - for iNdEx := len(m.InstrumentationLibraryMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InstrumentationLibraryMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *InstrumentationLibraryMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InstrumentationLibraryMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InstrumentationLibraryMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - { - size := m.Data.Size() - i -= size - if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Unit) > 0 { - i -= len(m.Unit) - copy(dAtA[i:], m.Unit) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) - i-- - dAtA[i] = 0x1a - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Metric_IntGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_IntGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.IntGauge != nil { - { - size, err := m.IntGauge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *Metric_DoubleGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_DoubleGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DoubleGauge != nil { - { - size, err := m.DoubleGauge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *Metric_IntSum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_IntSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.IntSum != nil { - { - size, err := m.IntSum.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *Metric_DoubleSum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_DoubleSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DoubleSum != nil { - { - size, err := m.DoubleSum.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *Metric_IntHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_IntHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.IntHistogram != nil { - { - size, err := m.IntHistogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil -} -func (m *Metric_DoubleHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_DoubleHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DoubleHistogram != nil { - { - size, err := m.DoubleHistogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Metric_DoubleSummary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric_DoubleSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DoubleSummary != nil { - { - size, err := m.DoubleSummary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - return len(dAtA) - i, nil -} -func (m *IntGauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleGauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *IntSum) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntSum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IsMonotonic { - i-- - if m.IsMonotonic { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleSum) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleSum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleSum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IsMonotonic { - i-- - if m.IsMonotonic { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *IntHistogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleHistogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleHistogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x10 - } - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleSummary) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleSummary) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DataPoints) > 0 { - for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *IntDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Value)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *IntHistogramDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.ExplicitBounds) > 0 { - for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { - f10 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f10)) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) - i-- - dAtA[i] = 0x3a - } - if len(m.BucketCounts) > 0 { - for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) - i-- - dAtA[i] = 0x32 - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Sum)) - i-- - dAtA[i] = 0x29 - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleHistogramDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.ExplicitBounds) > 0 { - for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { - f11 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) - i-- - dAtA[i] = 0x3a - } - if len(m.BucketCounts) > 0 { - for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) - } - i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) - i-- - dAtA[i] = 0x32 - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x29 - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleSummaryDataPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleSummaryDataPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleSummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.QuantileValues) > 0 { - for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x29 - } - if m.Count != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) - i-- - dAtA[i] = 0x21 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x11 - } - if m.Quantile != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *IntExemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntExemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntExemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Value)) - i-- - dAtA[i] = 0x19 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.FilteredLabels) > 0 { - for iNdEx := len(m.FilteredLabels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FilteredLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DoubleExemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoubleExemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoubleExemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x19 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.FilteredLabels) > 0 { - for iNdEx := len(m.FilteredLabels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FilteredLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovMetrics(uint64(l)) - if len(m.InstrumentationLibraryMetrics) > 0 { - for _, e := range m.InstrumentationLibraryMetrics { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *InstrumentationLibraryMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.InstrumentationLibrary.Size() - n += 1 + l + sovMetrics(uint64(l)) - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Unit) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Data != nil { - n += m.Data.Size() - } - return n -} - -func (m *Metric_IntGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IntGauge != nil { - l = m.IntGauge.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_DoubleGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DoubleGauge != nil { - l = m.DoubleGauge.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_IntSum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IntSum != nil { - l = m.IntSum.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_DoubleSum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DoubleSum != nil { - l = m.DoubleSum.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_IntHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IntHistogram != nil { - l = m.IntHistogram.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_DoubleHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DoubleHistogram != nil { - l = m.DoubleHistogram.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *Metric_DoubleSummary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DoubleSummary != nil { - l = m.DoubleSummary.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} -func (m *IntGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *DoubleGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *IntSum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - if m.IsMonotonic { - n += 2 - } - return n -} - -func (m *DoubleSum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - if m.IsMonotonic { - n += 2 - } - return n -} - -func (m *IntHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *DoubleHistogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.AggregationTemporality != 0 { - n += 1 + sovMetrics(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *DoubleSummary) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DataPoints) > 0 { - for _, e := range m.DataPoints { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *IntDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *DoubleDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *IntHistogramDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum != 0 { - n += 9 - } - if len(m.BucketCounts) > 0 { - n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 - } - if len(m.ExplicitBounds) > 0 { - n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *DoubleHistogramDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum != 0 { - n += 9 - } - if len(m.BucketCounts) > 0 { - n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 - } - if len(m.ExplicitBounds) > 0 { - n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *DoubleSummaryDataPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Count != 0 { - n += 9 - } - if m.Sum != 0 { - n += 9 - } - if len(m.QuantileValues) > 0 { - for _, e := range m.QuantileValues { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *DoubleSummaryDataPoint_ValueAtQuantile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Quantile != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - return n -} - -func (m *IntExemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.FilteredLabels) > 0 { - for _, e := range m.FilteredLabels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - l = m.SpanId.Size() - n += 1 + l + sovMetrics(uint64(l)) - l = m.TraceId.Size() - n += 1 + l + sovMetrics(uint64(l)) - return n -} - -func (m *DoubleExemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.FilteredLabels) > 0 { - for _, e := range m.FilteredLabels { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.TimeUnixNano != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - l = m.SpanId.Size() - n += 1 + l + sovMetrics(uint64(l)) - l = m.TraceId.Size() - n += 1 + l + sovMetrics(uint64(l)) - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibraryMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InstrumentationLibraryMetrics = append(m.InstrumentationLibraryMetrics, &InstrumentationLibraryMetrics{}) - if err := m.InstrumentationLibraryMetrics[len(m.InstrumentationLibraryMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InstrumentationLibraryMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InstrumentationLibraryMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InstrumentationLibraryMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &Metric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Unit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntGauge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &IntGauge{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_IntGauge{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleGauge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DoubleGauge{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_DoubleGauge{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntSum", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &IntSum{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_IntSum{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleSum", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DoubleSum{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_DoubleSum{v} - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntHistogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &IntHistogram{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_IntHistogram{v} - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DoubleHistogram{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_DoubleHistogram{v} - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DoubleSummary{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Data = &Metric_DoubleSummary{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntGauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntGauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntGauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &IntDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleGauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleGauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleGauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &DoubleDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntSum) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntSum: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntSum: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &IntDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsMonotonic = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleSum) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleSum: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleSum: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &DoubleDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsMonotonic = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntHistogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntHistogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntHistogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &IntHistogramDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleHistogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Histogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &DoubleHistogramDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleSummary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Summary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataPoints = append(m.DataPoints, &DoubleSummaryDataPoint{}) - if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, v11.StringKeyValue{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Value = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, IntExemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, v11.StringKeyValue{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, DoubleExemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntHistogramDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntHistogramDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, v11.StringKeyValue{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - m.Sum = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Sum = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 6: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.BucketCounts) == 0 { - m.BucketCounts = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) - } - case 7: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.ExplicitBounds) == 0 { - m.ExplicitBounds = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, IntExemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleHistogramDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleHistogramDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, v11.StringKeyValue{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 6: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.BucketCounts) == 0 { - m.BucketCounts = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.BucketCounts = append(m.BucketCounts, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) - } - case 7: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.ExplicitBounds) == 0 { - m.ExplicitBounds = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.ExplicitBounds = append(m.ExplicitBounds, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, DoubleExemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleSummaryDataPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleSummaryDataPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleSummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, v11.StringKeyValue{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QuantileValues = append(m.QuantileValues, &DoubleSummaryDataPoint_ValueAtQuantile{}) - if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleSummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Quantile = float64(math.Float64frombits(v)) - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IntExemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntExemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntExemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilteredLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilteredLabels = append(m.FilteredLabels, v11.StringKeyValue{}) - if err := m.FilteredLabels[len(m.FilteredLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Value = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoubleExemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoubleExemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoubleExemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilteredLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilteredLabels = append(m.FilteredLabels, v11.StringKeyValue{}) - if err := m.FilteredLabels[len(m.FilteredLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/resource/v1/resource.pb.go b/internal/otel_collector/internal/data/protogen/resource/v1/resource.pb.go deleted file mode 100644 index 95a287ab1cf..00000000000 --- a/internal/otel_collector/internal/data/protogen/resource/v1/resource.pb.go +++ /dev/null @@ -1,378 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - v1 "go.opentelemetry.io/collector/internal/data/protogen/common/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Set of labels that describe the resource. - Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_446f73eacf88f3f5, []int{0} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetAttributes() []v1.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Resource) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func init() { - proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) -} - -var fileDescriptor_446f73eacf88f3f5 = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, - 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x22, 0xe9, 0xf9, 0xe9, - 0xf9, 0x10, 0x63, 0x40, 0x2c, 0x88, 0x0a, 0x29, 0x2d, 0x6c, 0xd6, 0x24, 0xe7, 0xe7, 0xe6, 0xe6, - 0xe7, 0x81, 0x2c, 0x81, 0xb0, 0x20, 0x6a, 0x95, 0x26, 0x33, 0x72, 0x71, 0x04, 0x41, 0x4d, 0x14, - 0xf2, 0xe5, 0xe2, 0x4a, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x2d, 0x96, 0x60, 0x54, - 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd7, 0xc3, 0xe6, 0x08, 0xa8, 0x19, 0x65, 0x86, 0x7a, 0xde, 0xa9, - 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x21, 0x19, 0x20, - 0x64, 0xc1, 0x25, 0x91, 0x52, 0x94, 0x5f, 0x50, 0x90, 0x9a, 0x12, 0x8f, 0x10, 0x8d, 0x4f, 0xce, - 0x2f, 0xcd, 0x2b, 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0d, 0x12, 0x83, 0xca, 0x3b, 0xc2, 0xa5, - 0x9d, 0x41, 0xb2, 0x4e, 0xfd, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, - 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0xc0, - 0xa5, 0x94, 0x99, 0xaf, 0x47, 0x20, 0x58, 0x9c, 0x78, 0x61, 0x3e, 0x0a, 0x00, 0x49, 0x05, 0x30, - 0x46, 0x39, 0xa4, 0xa3, 0x6b, 0xca, 0x04, 0x85, 0x48, 0x4e, 0x4e, 0x6a, 0x72, 0x49, 0x7e, 0x91, - 0x7e, 0x66, 0x5e, 0x49, 0x6a, 0x51, 0x5e, 0x62, 0x8e, 0x7e, 0x4a, 0x62, 0x49, 0x22, 0x24, 0xbc, - 0xd2, 0x53, 0xf3, 0x90, 0x63, 0x26, 0x89, 0x0d, 0x2c, 0x6a, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, - 0x4e, 0x00, 0x89, 0x9d, 0xc3, 0x01, 0x00, 0x00, -} - -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DroppedAttributesCount != 0 { - i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x10 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintResource(dAtA []byte, offset int, v uint64) int { - offset -= sovResource(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovResource(uint64(m.DroppedAttributesCount)) - } - return n -} - -func sovResource(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResource(x uint64) (n int) { - return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResource(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResource - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResource - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResource - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/protogen/trace/v1/trace.pb.go b/internal/otel_collector/internal/data/protogen/trace/v1/trace.pb.go deleted file mode 100644 index 38bf594d79e..00000000000 --- a/internal/otel_collector/internal/data/protogen/trace/v1/trace.pb.go +++ /dev/null @@ -1,2649 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/trace/v1/trace.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_internal_data "go.opentelemetry.io/collector/internal/data" - v11 "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span represents an internal operation within an application, - // as opposed to an operations happening at the boundaries. Default value. - Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SPAN_KIND_SERVER Span_SpanKind = 2 - // Indicates that the span describes a request to some remote service. - Span_SPAN_KIND_CLIENT Span_SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SPAN_KIND_INTERNAL", - 2: "SPAN_KIND_SERVER", - 3: "SPAN_KIND_CLIENT", - 4: "SPAN_KIND_PRODUCER", - 5: "SPAN_KIND_CONSUMER", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SPAN_KIND_INTERNAL": 1, - "SPAN_KIND_SERVER": 2, - "SPAN_KIND_CLIENT": 3, - "SPAN_KIND_PRODUCER": 4, - "SPAN_KIND_CONSUMER": 5, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 0} -} - -type Status_DeprecatedStatusCode int32 - -const ( - Status_DEPRECATED_STATUS_CODE_OK Status_DeprecatedStatusCode = 0 - Status_DEPRECATED_STATUS_CODE_CANCELLED Status_DeprecatedStatusCode = 1 - Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR Status_DeprecatedStatusCode = 2 - Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT Status_DeprecatedStatusCode = 3 - Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED Status_DeprecatedStatusCode = 4 - Status_DEPRECATED_STATUS_CODE_NOT_FOUND Status_DeprecatedStatusCode = 5 - Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS Status_DeprecatedStatusCode = 6 - Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED Status_DeprecatedStatusCode = 7 - Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED Status_DeprecatedStatusCode = 8 - Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION Status_DeprecatedStatusCode = 9 - Status_DEPRECATED_STATUS_CODE_ABORTED Status_DeprecatedStatusCode = 10 - Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE Status_DeprecatedStatusCode = 11 - Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED Status_DeprecatedStatusCode = 12 - Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR Status_DeprecatedStatusCode = 13 - Status_DEPRECATED_STATUS_CODE_UNAVAILABLE Status_DeprecatedStatusCode = 14 - Status_DEPRECATED_STATUS_CODE_DATA_LOSS Status_DeprecatedStatusCode = 15 - Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED Status_DeprecatedStatusCode = 16 -) - -var Status_DeprecatedStatusCode_name = map[int32]string{ - 0: "DEPRECATED_STATUS_CODE_OK", - 1: "DEPRECATED_STATUS_CODE_CANCELLED", - 2: "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", - 3: "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT", - 4: "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED", - 5: "DEPRECATED_STATUS_CODE_NOT_FOUND", - 6: "DEPRECATED_STATUS_CODE_ALREADY_EXISTS", - 7: "DEPRECATED_STATUS_CODE_PERMISSION_DENIED", - 8: "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED", - 9: "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION", - 10: "DEPRECATED_STATUS_CODE_ABORTED", - 11: "DEPRECATED_STATUS_CODE_OUT_OF_RANGE", - 12: "DEPRECATED_STATUS_CODE_UNIMPLEMENTED", - 13: "DEPRECATED_STATUS_CODE_INTERNAL_ERROR", - 14: "DEPRECATED_STATUS_CODE_UNAVAILABLE", - 15: "DEPRECATED_STATUS_CODE_DATA_LOSS", - 16: "DEPRECATED_STATUS_CODE_UNAUTHENTICATED", -} - -var Status_DeprecatedStatusCode_value = map[string]int32{ - "DEPRECATED_STATUS_CODE_OK": 0, - "DEPRECATED_STATUS_CODE_CANCELLED": 1, - "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR": 2, - "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT": 3, - "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED": 4, - "DEPRECATED_STATUS_CODE_NOT_FOUND": 5, - "DEPRECATED_STATUS_CODE_ALREADY_EXISTS": 6, - "DEPRECATED_STATUS_CODE_PERMISSION_DENIED": 7, - "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED": 8, - "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION": 9, - "DEPRECATED_STATUS_CODE_ABORTED": 10, - "DEPRECATED_STATUS_CODE_OUT_OF_RANGE": 11, - "DEPRECATED_STATUS_CODE_UNIMPLEMENTED": 12, - "DEPRECATED_STATUS_CODE_INTERNAL_ERROR": 13, - "DEPRECATED_STATUS_CODE_UNAVAILABLE": 14, - "DEPRECATED_STATUS_CODE_DATA_LOSS": 15, - "DEPRECATED_STATUS_CODE_UNAUTHENTICATED": 16, -} - -func (x Status_DeprecatedStatusCode) String() string { - return proto.EnumName(Status_DeprecatedStatusCode_name, int32(x)) -} - -func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 0} -} - -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status -type Status_StatusCode int32 - -const ( - // The default status. - Status_STATUS_CODE_UNSET Status_StatusCode = 0 - // The Span has been validated by an Application developers or Operator to have - // completed successfully. - Status_STATUS_CODE_OK Status_StatusCode = 1 - // The Span contains an error. - Status_STATUS_CODE_ERROR Status_StatusCode = 2 -) - -var Status_StatusCode_name = map[int32]string{ - 0: "STATUS_CODE_UNSET", - 1: "STATUS_CODE_OK", - 2: "STATUS_CODE_ERROR", -} - -var Status_StatusCode_value = map[string]int32{ - "STATUS_CODE_UNSET": 0, - "STATUS_CODE_OK": 1, - "STATUS_CODE_ERROR": 2, -} - -func (x Status_StatusCode) String() string { - return proto.EnumName(Status_StatusCode_name, int32(x)) -} - -func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 1} -} - -// A collection of InstrumentationLibrarySpans from a Resource. -type ResourceSpans struct { - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of InstrumentationLibrarySpans that originate from a resource. - InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` -} - -func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } -func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } -func (*ResourceSpans) ProtoMessage() {} -func (*ResourceSpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{0} -} -func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceSpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSpans.Merge(m, src) -} -func (m *ResourceSpans) XXX_Size() int { - return m.Size() -} -func (m *ResourceSpans) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSpans.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo - -func (m *ResourceSpans) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { - if m != nil { - return m.InstrumentationLibrarySpans - } - return nil -} - -// A collection of Spans produced by an InstrumentationLibrary. -type InstrumentationLibrarySpans struct { - // The instrumentation library information for the spans in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` - // A list of Spans that originate from an instrumentation library. - Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` -} - -func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } -func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibrarySpans) ProtoMessage() {} -func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{1} -} -func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) -} -func (m *InstrumentationLibrarySpans) XXX_Size() int { - return m.Size() -} -func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo - -func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return v11.InstrumentationLibrary{} -} - -func (m *InstrumentationLibrarySpans) GetSpans() []*Span { - if m != nil { - return m.Spans - } - return nil -} - -// Span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace and form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. -// -// The next available field id is 17. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. - // - // This field is required. - TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. - // - // This field is required. - SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"parent_span_id"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. - // - // This field is required. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. The value can be a string, - // an integer, a double or the Boolean values `true` or `false`. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. - Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2} -} -func (m *Span) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return m.Size() -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *Span) GetEndTimeUnixNano() uint64 { - if m != nil { - return m.EndTimeUnixNano - } - return 0 -} - -func (m *Span) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Span) GetEvents() []*Span_Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *Span) GetDroppedEventsCount() uint32 { - if m != nil { - return m.DroppedEventsCount - } - return 0 -} - -func (m *Span) GetLinks() []*Span_Link { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetDroppedLinksCount() uint32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -func (m *Span) GetStatus() Status { - if m != nil { - return m.Status - } - return Status{} -} - -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. -type Span_Event struct { - // time_unix_nano is the time the event occurred. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (m *Span_Event) Reset() { *m = Span_Event{} } -func (m *Span_Event) String() string { return proto.CompactTextString(m) } -func (*Span_Event) ProtoMessage() {} -func (*Span_Event) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 0} -} -func (m *Span_Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span_Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Event.Merge(m, src) -} -func (m *Span_Event) XXX_Size() int { - return m.Size() -} -func (m *Span_Event) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Event proto.InternalMessageInfo - -func (m *Span_Event) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *Span_Event) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span_Event) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Event) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId go_opentelemetry_io_collector_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/internal/data.TraceID" json:"trace_id"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId go_opentelemetry_io_collector_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/internal/data.SpanID" json:"span_id"` - // The trace_state associated with the link. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 1} -} -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return m.Size() -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span_Link) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Link) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // The deprecated status code. This is an optional field. - // - // This field is deprecated and is replaced by the `code` field below. See backward - // compatibility notes below. According to our stability guarantees this field - // will be removed in 12 months, on Oct 22, 2021. All usage of old senders and - // receivers that do not understand the `code` field MUST be phased out by then. - DeprecatedCode Status_DeprecatedStatusCode `protobuf:"varint,1,opt,name=deprecated_code,json=deprecatedCode,proto3,enum=opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode" json:"deprecated_code,omitempty"` // Deprecated: Do not use. - // A developer-facing human readable error message. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // The status code. - Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -// Deprecated: Do not use. -func (m *Status) GetDeprecatedCode() Status_DeprecatedStatusCode { - if m != nil { - return m.DeprecatedCode - } - return Status_DEPRECATED_STATUS_CODE_OK -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetCode() Status_StatusCode { - if m != nil { - return m.Code - } - return Status_STATUS_CODE_UNSET -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode", Status_DeprecatedStatusCode_name, Status_DeprecatedStatusCode_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) - proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") - proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") - proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") - proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") - proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") - proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) -} - -var fileDescriptor_5c407ac9c675a601 = []byte{ - // 1224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x41, 0x6f, 0xdb, 0x46, - 0x13, 0x15, 0x6d, 0x49, 0x76, 0xc6, 0xb6, 0xcc, 0xec, 0xe7, 0xe4, 0x63, 0x9c, 0x46, 0x16, 0x54, - 0x37, 0x51, 0x92, 0x56, 0x6a, 0x1c, 0x14, 0x48, 0x5b, 0x04, 0x2d, 0x45, 0xae, 0x13, 0xc2, 0x34, - 0x29, 0x2c, 0x29, 0x37, 0xed, 0x85, 0x65, 0xcc, 0xad, 0x41, 0x44, 0x22, 0x05, 0x6a, 0x65, 0x24, - 0x87, 0x5e, 0x7b, 0xee, 0xb5, 0xff, 0x28, 0x28, 0x50, 0x20, 0xc7, 0x22, 0x45, 0x83, 0xc2, 0xfe, - 0x1b, 0x3d, 0x14, 0xbb, 0xa4, 0x6c, 0xcb, 0x10, 0xe5, 0x04, 0x45, 0x2e, 0xbd, 0x18, 0xd4, 0xcc, - 0x9b, 0xf7, 0xde, 0xce, 0xcc, 0xd2, 0x12, 0x34, 0xe2, 0x01, 0x8d, 0x18, 0xed, 0xd1, 0x3e, 0x65, - 0xc9, 0x8b, 0xd6, 0x20, 0x89, 0x59, 0xdc, 0x62, 0x89, 0xbf, 0x4f, 0x5b, 0x87, 0xf7, 0xd2, 0x87, - 0xa6, 0x08, 0xa2, 0x0f, 0x26, 0x90, 0x69, 0xb0, 0x99, 0x02, 0x0e, 0xef, 0xad, 0xaf, 0x1d, 0xc4, - 0x07, 0x71, 0x5a, 0xcd, 0x9f, 0xd2, 0xf4, 0xfa, 0x9d, 0x69, 0xec, 0xfb, 0x71, 0xbf, 0x1f, 0x47, - 0x9c, 0x3e, 0x7d, 0xca, 0xb0, 0xcd, 0x69, 0xd8, 0x84, 0x0e, 0xe3, 0x51, 0x92, 0x9a, 0x19, 0x3f, - 0xa7, 0xf8, 0xfa, 0x1f, 0x12, 0xac, 0x90, 0x2c, 0xe4, 0x0c, 0xfc, 0x68, 0x88, 0x76, 0x60, 0x71, - 0x8c, 0x51, 0xa4, 0x9a, 0xd4, 0x58, 0xda, 0xba, 0xdd, 0x9c, 0x66, 0xfa, 0x84, 0xe8, 0xf0, 0x5e, - 0x73, 0xcc, 0xd0, 0x2e, 0xbe, 0x7c, 0xb3, 0x51, 0x20, 0x27, 0x04, 0xe8, 0x47, 0xb8, 0x11, 0x46, - 0x43, 0x96, 0x8c, 0xfa, 0x34, 0x62, 0x3e, 0x0b, 0xe3, 0xc8, 0xeb, 0x85, 0x4f, 0x13, 0x3f, 0x79, - 0xe1, 0x0d, 0xb9, 0x9a, 0x32, 0x57, 0x9b, 0x6f, 0x2c, 0x6d, 0x7d, 0xde, 0x9c, 0xd5, 0x96, 0xa6, - 0x31, 0x49, 0x61, 0xa6, 0x0c, 0xc2, 0x2e, 0xb9, 0x1e, 0xe6, 0x27, 0xeb, 0xbf, 0x49, 0x70, 0x7d, - 0x46, 0x31, 0x62, 0xf0, 0xff, 0x1c, 0x7b, 0xd9, 0xd1, 0x3f, 0x9b, 0x6a, 0x2c, 0xeb, 0x78, 0xae, - 0xb3, 0xac, 0x0d, 0x57, 0xa7, 0x5b, 0x43, 0x0f, 0xa0, 0x74, 0xf6, 0xf0, 0xf5, 0xd9, 0x87, 0xe7, - 0x4e, 0x49, 0x5a, 0x50, 0x3f, 0x5a, 0x86, 0x22, 0xff, 0x8c, 0xf6, 0x60, 0x51, 0x00, 0xbc, 0x30, - 0x10, 0x4e, 0x97, 0xdb, 0x5f, 0x72, 0xc9, 0xd7, 0x6f, 0x36, 0xee, 0x1f, 0xc4, 0xe7, 0xf8, 0x42, - 0xbe, 0x2c, 0xbd, 0x1e, 0xdd, 0x67, 0x71, 0xd2, 0x0a, 0x23, 0x46, 0x93, 0xc8, 0xef, 0xb5, 0x02, - 0x9f, 0xf9, 0x4d, 0x97, 0x73, 0x18, 0x3a, 0x59, 0x10, 0x64, 0x46, 0x80, 0x1c, 0x58, 0xe0, 0x4a, - 0x9c, 0x76, 0x4e, 0xd0, 0x7e, 0x91, 0xd1, 0x6e, 0xbd, 0x0b, 0x2d, 0xb7, 0x68, 0xe8, 0xa4, 0xcc, - 0xa9, 0x8c, 0x00, 0x6d, 0xc0, 0x52, 0x6a, 0x76, 0xc8, 0x7c, 0x46, 0x95, 0xf9, 0x9a, 0xd4, 0xb8, - 0x44, 0x40, 0x84, 0x1c, 0x1e, 0x41, 0xdf, 0x43, 0x65, 0xe0, 0x27, 0x34, 0x62, 0xde, 0x58, 0xbc, - 0xf8, 0xaf, 0xc5, 0x97, 0x53, 0x46, 0x27, 0xb5, 0x80, 0xa0, 0x18, 0xf9, 0x7d, 0xaa, 0x94, 0x84, - 0xb6, 0x78, 0x46, 0x5f, 0x41, 0xf1, 0x59, 0x18, 0x05, 0x4a, 0xb9, 0x26, 0x35, 0x2a, 0x5b, 0x77, - 0x2f, 0x9e, 0x82, 0xf8, 0xb3, 0x13, 0x46, 0x01, 0x11, 0x85, 0xa8, 0x05, 0x6b, 0x43, 0xe6, 0x27, - 0xcc, 0x63, 0x61, 0x9f, 0x7a, 0xa3, 0x28, 0x7c, 0xee, 0x45, 0x7e, 0x14, 0x2b, 0x0b, 0x35, 0xa9, - 0x51, 0x26, 0x97, 0x45, 0xce, 0x0d, 0xfb, 0xb4, 0x1b, 0x85, 0xcf, 0x2d, 0x3f, 0x8a, 0xd1, 0x5d, - 0x40, 0x34, 0x0a, 0xce, 0xc3, 0x17, 0x05, 0x7c, 0x95, 0x46, 0xc1, 0x04, 0x78, 0x17, 0xc0, 0x67, - 0x2c, 0x09, 0x9f, 0x8e, 0x18, 0x1d, 0x2a, 0x97, 0xc4, 0xaa, 0xdc, 0xba, 0x60, 0x1d, 0x77, 0xe8, - 0x8b, 0x3d, 0xbf, 0x37, 0x1a, 0xdf, 0xc3, 0x33, 0x04, 0xe8, 0x01, 0x28, 0x41, 0x12, 0x0f, 0x06, - 0x34, 0xf0, 0x4e, 0xa3, 0xde, 0x7e, 0x3c, 0x8a, 0x98, 0x02, 0x35, 0xa9, 0xb1, 0x42, 0xae, 0x66, - 0x79, 0xf5, 0x24, 0xad, 0xf1, 0x2c, 0xfa, 0x1a, 0xca, 0xf4, 0x90, 0x46, 0x6c, 0xa8, 0x2c, 0x09, - 0x13, 0x8d, 0xb7, 0xe8, 0x14, 0xe6, 0x05, 0x24, 0xab, 0x43, 0x9f, 0xc2, 0xda, 0x58, 0x3b, 0x8d, - 0x64, 0xba, 0xcb, 0x42, 0x17, 0x65, 0x39, 0x51, 0x93, 0x69, 0x3e, 0x84, 0x52, 0x2f, 0x8c, 0x9e, - 0x0d, 0x95, 0x95, 0x19, 0xe7, 0x9e, 0x94, 0x34, 0xc3, 0xe8, 0x19, 0x49, 0xab, 0x50, 0x13, 0xfe, - 0x37, 0x16, 0x14, 0x81, 0x4c, 0xaf, 0x22, 0xf4, 0x2e, 0x67, 0x29, 0x5e, 0x90, 0xc9, 0xb5, 0xa1, - 0xcc, 0x77, 0x73, 0x34, 0x54, 0x56, 0xc5, 0xb5, 0xdf, 0xbc, 0x40, 0x4f, 0x60, 0xb3, 0x26, 0x67, - 0x95, 0xeb, 0xbf, 0x4a, 0x50, 0x12, 0x47, 0x40, 0x9b, 0x50, 0x39, 0x37, 0x62, 0x49, 0x8c, 0x78, - 0x99, 0x9d, 0x9d, 0xef, 0x78, 0x25, 0xe7, 0xce, 0xac, 0xe4, 0xe4, 0xcc, 0xe7, 0xdf, 0xe7, 0xcc, - 0x8b, 0xb3, 0x66, 0xbe, 0xfe, 0xe7, 0x1c, 0x14, 0x79, 0x7f, 0xfe, 0x63, 0x2f, 0x9a, 0xc9, 0xfe, - 0x16, 0xdf, 0x67, 0x7f, 0x4b, 0xb3, 0xfa, 0x5b, 0xff, 0x45, 0x82, 0xc5, 0xf1, 0xdb, 0x04, 0x5d, - 0x83, 0x2b, 0x4e, 0x47, 0xb5, 0xbc, 0x1d, 0xc3, 0xd2, 0xbd, 0xae, 0xe5, 0x74, 0xb0, 0x66, 0x6c, - 0x1b, 0x58, 0x97, 0x0b, 0xe8, 0x2a, 0xa0, 0xd3, 0x94, 0x61, 0xb9, 0x98, 0x58, 0xaa, 0x29, 0x4b, - 0x68, 0x0d, 0xe4, 0xd3, 0xb8, 0x83, 0xc9, 0x1e, 0x26, 0xf2, 0xdc, 0x64, 0x54, 0x33, 0x0d, 0x6c, - 0xb9, 0xf2, 0xfc, 0x24, 0x47, 0x87, 0xd8, 0x7a, 0x57, 0xc3, 0x44, 0x2e, 0x4e, 0xc6, 0x35, 0xdb, - 0x72, 0xba, 0xbb, 0x98, 0xc8, 0xa5, 0xfa, 0xdf, 0x0b, 0x50, 0x4e, 0x37, 0x1c, 0xfd, 0x00, 0xab, - 0x01, 0x1d, 0x24, 0x74, 0xdf, 0x67, 0x34, 0xf0, 0xf6, 0xe3, 0x20, 0xfd, 0x4a, 0x50, 0xb9, 0xe8, - 0x1f, 0x76, 0x5a, 0xde, 0xd4, 0x4f, 0x6a, 0xd3, 0x80, 0x16, 0x07, 0xb4, 0x3d, 0xa7, 0x48, 0xa4, - 0x72, 0xca, 0xca, 0x63, 0x48, 0x81, 0x85, 0x3e, 0x1d, 0x0e, 0xfd, 0x83, 0xf1, 0x75, 0x18, 0x7f, - 0x44, 0x1a, 0x14, 0x85, 0xec, 0xbc, 0x90, 0x6d, 0xbd, 0x95, 0xec, 0xa9, 0x18, 0x11, 0xc5, 0xf5, - 0xd7, 0x25, 0x58, 0x9b, 0xe6, 0x05, 0xdd, 0x80, 0x6b, 0x3a, 0xee, 0x10, 0xac, 0xa9, 0x2e, 0xd6, - 0x3d, 0xc7, 0x55, 0xdd, 0xae, 0xe3, 0x69, 0xb6, 0x8e, 0x3d, 0x7b, 0x47, 0x2e, 0xa0, 0x4d, 0xa8, - 0xe5, 0xa4, 0x35, 0xd5, 0xd2, 0xb0, 0x69, 0x62, 0x5d, 0x96, 0x50, 0x03, 0x36, 0x73, 0x50, 0x5d, - 0x6b, 0xc7, 0xb2, 0xbf, 0xb1, 0x3c, 0x4c, 0x88, 0xcd, 0xe7, 0x73, 0x17, 0x6e, 0xe5, 0x20, 0x0d, - 0x6b, 0x4f, 0x35, 0x0d, 0xdd, 0x53, 0xc9, 0xa3, 0xee, 0x6e, 0x3a, 0xb6, 0x8f, 0xa1, 0x91, 0x03, - 0xd6, 0xb1, 0xaa, 0x9b, 0x86, 0x85, 0x3d, 0xfc, 0x44, 0xc3, 0x58, 0xc7, 0xba, 0x5c, 0x9c, 0x61, - 0xd5, 0xb2, 0x5d, 0x6f, 0xdb, 0xee, 0x5a, 0xba, 0x5c, 0x42, 0xb7, 0xe1, 0xa3, 0x1c, 0x94, 0x6a, - 0x12, 0xac, 0xea, 0xdf, 0x7a, 0xf8, 0x89, 0xe1, 0xb8, 0x8e, 0x5c, 0x9e, 0x21, 0xdf, 0xc1, 0x64, - 0xd7, 0x70, 0x1c, 0xc3, 0xb6, 0x3c, 0x1d, 0x5b, 0x7c, 0x4f, 0x17, 0xd0, 0x27, 0x70, 0x3b, 0x07, - 0x4d, 0xb0, 0x63, 0x77, 0x89, 0xc6, 0xcd, 0x3e, 0x56, 0xbb, 0x8e, 0x8b, 0x75, 0x79, 0x11, 0x35, - 0xe1, 0x4e, 0x0e, 0x7c, 0x5b, 0x35, 0x4c, 0xcc, 0xd7, 0x14, 0x6b, 0xb6, 0xa5, 0x1b, 0xae, 0x61, - 0x5b, 0xf2, 0x25, 0x54, 0x87, 0x6a, 0x9e, 0xef, 0xb6, 0x4d, 0x38, 0x27, 0xa0, 0x5b, 0xf0, 0x61, - 0xde, 0x2c, 0xbb, 0xae, 0x67, 0x6f, 0x7b, 0x44, 0xb5, 0x1e, 0x61, 0x79, 0x69, 0xe6, 0xbc, 0x8c, - 0xdd, 0x8e, 0x89, 0xf9, 0x00, 0xb0, 0x2e, 0x2f, 0xcf, 0x68, 0xd7, 0xf8, 0x2a, 0x66, 0xa3, 0x5d, - 0x41, 0x37, 0xa1, 0x9e, 0x4b, 0xaa, 0xee, 0xa9, 0x86, 0xa9, 0xb6, 0x4d, 0x2c, 0x57, 0x66, 0xcc, - 0x49, 0x57, 0x5d, 0xd5, 0x33, 0x6d, 0xc7, 0x91, 0x57, 0xd1, 0x1d, 0xb8, 0x99, 0xcf, 0xd6, 0x75, - 0x1f, 0x63, 0xcb, 0x35, 0x44, 0x4e, 0x96, 0xeb, 0x16, 0xc0, 0x99, 0x8d, 0xbe, 0x02, 0x97, 0x27, - 0xe1, 0x0e, 0x76, 0xe5, 0x02, 0x42, 0x50, 0x39, 0xb7, 0xdd, 0xd2, 0x79, 0x68, 0xb6, 0xa4, 0xed, - 0x9f, 0xa4, 0x97, 0x47, 0x55, 0xe9, 0xd5, 0x51, 0x55, 0xfa, 0xeb, 0xa8, 0x2a, 0xfd, 0x7c, 0x5c, - 0x2d, 0xbc, 0x3a, 0xae, 0x16, 0x7e, 0x3f, 0xae, 0x16, 0x60, 0x23, 0x8c, 0x67, 0x5e, 0xc0, 0x36, - 0x88, 0xf7, 0x7c, 0x87, 0x07, 0x3b, 0xd2, 0x77, 0x0f, 0xdf, 0xe1, 0x55, 0x9e, 0xfe, 0x52, 0x39, - 0xa0, 0xd1, 0xc9, 0xcf, 0xa6, 0xa7, 0x65, 0x11, 0xba, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x46, 0x9b, 0x55, 0x34, 0x5d, 0x0d, 0x00, 0x00, -} - -func (m *ResourceSpans) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.InstrumentationLibrarySpans) > 0 { - for iNdEx := len(m.InstrumentationLibrarySpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InstrumentationLibrarySpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *InstrumentationLibrarySpans) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InstrumentationLibrarySpans) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InstrumentationLibrarySpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Spans) > 0 { - for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Span) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - if m.DroppedLinksCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount)) - i-- - dAtA[i] = 0x70 - } - if len(m.Links) > 0 { - for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - } - if m.DroppedEventsCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount)) - i-- - dAtA[i] = 0x60 - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x50 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if m.EndTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) - i-- - dAtA[i] = 0x41 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x39 - } - if m.Kind != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x30 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x2a - } - { - size := m.ParentSpanId.Size() - i -= size - if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.TraceState) > 0 { - i -= len(m.TraceState) - copy(dAtA[i:], m.TraceState) - i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) - i-- - dAtA[i] = 0x1a - } - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Span_Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x20 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.TimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *Span_Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DroppedAttributesCount != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x28 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.TraceState) > 0 { - i -= len(m.TraceState) - copy(dAtA[i:], m.TraceState) - i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) - i-- - dAtA[i] = 0x1a - } - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTrace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Code != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x18 - } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTrace(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - } - if m.DeprecatedCode != 0 { - i = encodeVarintTrace(dAtA, i, uint64(m.DeprecatedCode)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { - offset -= sovTrace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceSpans) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovTrace(uint64(l)) - if len(m.InstrumentationLibrarySpans) > 0 { - for _, e := range m.InstrumentationLibrarySpans { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - return n -} - -func (m *InstrumentationLibrarySpans) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.InstrumentationLibrary.Size() - n += 1 + l + sovTrace(uint64(l)) - if len(m.Spans) > 0 { - for _, e := range m.Spans { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - return n -} - -func (m *Span) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.TraceState) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - l = m.ParentSpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if m.Kind != 0 { - n += 1 + sovTrace(uint64(m.Kind)) - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.EndTimeUnixNano != 0 { - n += 9 - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedEventsCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedEventsCount)) - } - if len(m.Links) > 0 { - for _, e := range m.Links { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedLinksCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedLinksCount)) - } - l = m.Status.Size() - n += 1 + l + sovTrace(uint64(l)) - return n -} - -func (m *Span_Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TimeUnixNano != 0 { - n += 9 - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - return n -} - -func (m *Span_Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovTrace(uint64(l)) - l = len(m.TraceState) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTrace(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeprecatedCode != 0 { - n += 1 + sovTrace(uint64(m.DeprecatedCode)) - } - l = len(m.Message) - if l > 0 { - n += 1 + l + sovTrace(uint64(l)) - } - if m.Code != 0 { - n += 1 + sovTrace(uint64(m.Code)) - } - return n -} - -func sovTrace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTrace(x uint64) (n int) { - return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceSpans) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrarySpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InstrumentationLibrarySpans = append(m.InstrumentationLibrarySpans, &InstrumentationLibrarySpans{}) - if err := m.InstrumentationLibrarySpans[len(m.InstrumentationLibrarySpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InstrumentationLibrarySpans) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InstrumentationLibrarySpans: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InstrumentationLibrarySpans: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spans = append(m.Spans, &Span{}) - if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Span: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Span_SpanKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 8: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) - } - m.EndTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &Span_Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType) - } - m.DroppedEventsCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedEventsCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Links = append(m.Links, &Span_Link{}) - if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType) - } - m.DroppedLinksCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedLinksCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span_Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) - } - m.TimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Span_Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCode", wireType) - } - m.DeprecatedCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DeprecatedCode |= Status_DeprecatedStatusCode(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTrace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTrace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTrace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= Status_StatusCode(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTrace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTrace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTrace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTrace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTrace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTrace - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTrace - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/otel_collector/internal/data/spanid.go b/internal/otel_collector/internal/data/spanid.go deleted file mode 100644 index e5e790fb3f8..00000000000 --- a/internal/otel_collector/internal/data/spanid.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - "encoding/hex" - "errors" -) - -const spanIDSize = 8 - -var errInvalidSpanIDSize = errors.New("invalid length for SpanID") - -// SpanID is a custom data type that is used for all span_id fields in OTLP -// Protobuf messages. -type SpanID struct { - id [spanIDSize]byte -} - -// NewSpanID creates a SpanID from a byte slice. -func NewSpanID(bytes [8]byte) SpanID { - return SpanID{id: bytes} -} - -// HexString returns hex representation of the ID. -func (sid SpanID) HexString() string { - if sid.IsEmpty() { - return "" - } - return hex.EncodeToString(sid.id[:]) -} - -// Size returns the size of the data to serialize. -func (sid *SpanID) Size() int { - if sid.IsEmpty() { - return 0 - } - return spanIDSize -} - -// Equal returns true if ids are equal. -func (sid SpanID) Equal(that SpanID) bool { - return sid.id == that.id -} - -// IsEmpty returns true if id contains at least one non-zero byte. -func (sid SpanID) IsEmpty() bool { - return sid.id == [8]byte{} -} - -// Bytes returns the byte array representation of the SpanID. -func (sid SpanID) Bytes() [8]byte { - return sid.id -} - -// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. -func (sid *SpanID) MarshalTo(data []byte) (n int, err error) { - if sid.IsEmpty() { - return 0, nil - } - return marshalBytes(data, sid.id[:]) -} - -// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. -func (sid *SpanID) Unmarshal(data []byte) error { - if len(data) == 0 { - sid.id = [8]byte{} - return nil - } - - if len(data) != spanIDSize { - return errInvalidSpanIDSize - } - - copy(sid.id[:], data) - return nil -} - -// MarshalJSON converts SpanID into a hex string enclosed in quotes. -func (sid SpanID) MarshalJSON() ([]byte, error) { - if sid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(sid.id[:]) -} - -// UnmarshalJSON decodes SpanID from hex string, possibly enclosed in quotes. -// Called by Protobuf JSON deserialization. -func (sid *SpanID) UnmarshalJSON(data []byte) error { - sid.id = [8]byte{} - return unmarshalJSON(sid.id[:], data) -} diff --git a/internal/otel_collector/internal/data/traceid.go b/internal/otel_collector/internal/data/traceid.go deleted file mode 100644 index b608621b72a..00000000000 --- a/internal/otel_collector/internal/data/traceid.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - "encoding/hex" - "errors" -) - -const traceIDSize = 16 - -var errInvalidTraceIDSize = errors.New("invalid length for SpanID") - -// TraceID is a custom data type that is used for all trace_id fields in OTLP -// Protobuf messages. -type TraceID struct { - id [traceIDSize]byte -} - -// NewTraceID creates a TraceID from a byte slice. -func NewTraceID(bytes [16]byte) TraceID { - return TraceID{ - id: bytes, - } -} - -// HexString returns hex representation of the ID. -func (tid TraceID) HexString() string { - if tid.IsEmpty() { - return "" - } - return hex.EncodeToString(tid.id[:]) -} - -// Size returns the size of the data to serialize. -func (tid *TraceID) Size() int { - if tid.IsEmpty() { - return 0 - } - return traceIDSize -} - -// Equal returns true if ids are equal. -func (tid TraceID) Equal(that TraceID) bool { - return tid.id == that.id -} - -// IsEmpty returns true if id contains at leas one non-zero byte. -func (tid TraceID) IsEmpty() bool { - return tid.id == [16]byte{} -} - -// Bytes returns the byte array representation of the TraceID. -func (tid TraceID) Bytes() [16]byte { - return tid.id -} - -// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. -func (tid *TraceID) MarshalTo(data []byte) (n int, err error) { - if tid.IsEmpty() { - return 0, nil - } - return marshalBytes(data, tid.id[:]) -} - -// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. -func (tid *TraceID) Unmarshal(data []byte) error { - if len(data) == 0 { - tid.id = [16]byte{} - return nil - } - - if len(data) != traceIDSize { - return errInvalidTraceIDSize - } - - copy(tid.id[:], data) - return nil -} - -// MarshalJSON converts trace id into a hex string enclosed in quotes. -func (tid TraceID) MarshalJSON() ([]byte, error) { - if tid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(tid.id[:]) -} - -// UnmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -// Called by Protobuf JSON deserialization. -func (tid *TraceID) UnmarshalJSON(data []byte) error { - tid.id = [16]byte{} - return unmarshalJSON(tid.id[:], data) -} diff --git a/internal/otel_collector/internal/goldendataset/metrics_gen.go b/internal/otel_collector/internal/goldendataset/metrics_gen.go index f55acd2039a..2c4c297a3b3 100644 --- a/internal/otel_collector/internal/goldendataset/metrics_gen.go +++ b/internal/otel_collector/internal/goldendataset/metrics_gen.go @@ -17,7 +17,7 @@ package goldendataset import ( "fmt" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Simple utilities for generating metrics for testing @@ -86,9 +86,9 @@ func newMetricGenerator() metricGenerator { func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pdata.Metrics { md := pdata.NewMetrics() rms := md.ResourceMetrics() - rms.Resize(cfg.NumResourceMetrics) + rms.EnsureCapacity(cfg.NumResourceMetrics) for i := 0; i < cfg.NumResourceMetrics; i++ { - rm := rms.At(i) + rm := rms.AppendEmpty() resource := rm.Resource() for j := 0; j < cfg.NumResourceAttrs; j++ { resource.Attributes().Insert( @@ -103,35 +103,35 @@ func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pdata.Metrics { func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pdata.ResourceMetrics) { ilms := rm.InstrumentationLibraryMetrics() - ilms.Resize(cfg.NumILMPerResource) + ilms.EnsureCapacity(cfg.NumILMPerResource) for i := 0; i < cfg.NumILMPerResource; i++ { - ilm := ilms.At(i) + ilm := ilms.AppendEmpty() g.populateMetrics(cfg, ilm) } } func (g *metricGenerator) populateMetrics(cfg MetricsCfg, ilm pdata.InstrumentationLibraryMetrics) { metrics := ilm.Metrics() - metrics.Resize(cfg.NumMetricsPerILM) + metrics.EnsureCapacity(cfg.NumMetricsPerILM) for i := 0; i < cfg.NumMetricsPerILM; i++ { - metric := metrics.At(i) + metric := metrics.AppendEmpty() g.populateMetricDesc(cfg, metric) switch cfg.MetricDescriptorType { case pdata.MetricDataTypeIntGauge: metric.SetDataType(pdata.MetricDataTypeIntGauge) populateIntPoints(cfg, metric.IntGauge().DataPoints()) - case pdata.MetricDataTypeDoubleGauge: - metric.SetDataType(pdata.MetricDataTypeDoubleGauge) - populateDoublePoints(cfg, metric.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeGauge: + metric.SetDataType(pdata.MetricDataTypeGauge) + populateDoublePoints(cfg, metric.Gauge().DataPoints()) case pdata.MetricDataTypeIntSum: metric.SetDataType(pdata.MetricDataTypeIntSum) sum := metric.IntSum() sum.SetIsMonotonic(cfg.IsMonotonicSum) sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) populateIntPoints(cfg, sum.DataPoints()) - case pdata.MetricDataTypeDoubleSum: - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - sum := metric.DoubleSum() + case pdata.MetricDataTypeSum: + metric.SetDataType(pdata.MetricDataTypeSum) + sum := metric.Sum() sum.SetIsMonotonic(cfg.IsMonotonicSum) sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) populateDoublePoints(cfg, sum.DataPoints()) @@ -157,9 +157,9 @@ func (g *metricGenerator) populateMetricDesc(cfg MetricsCfg, metric pdata.Metric } func populateIntPoints(cfg MetricsCfg, pts pdata.IntDataPointSlice) { - pts.Resize(cfg.NumPtsPerMetric) + pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { - pt := pts.At(i) + pt := pts.AppendEmpty() pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) pt.SetValue(int64(cfg.PtVal + i)) @@ -168,9 +168,9 @@ func populateIntPoints(cfg MetricsCfg, pts pdata.IntDataPointSlice) { } func populateDoublePoints(cfg MetricsCfg, pts pdata.DoubleDataPointSlice) { - pts.Resize(cfg.NumPtsPerMetric) + pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { - pt := pts.At(i) + pt := pts.AppendEmpty() pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) pt.SetValue(float64(cfg.PtVal + i)) @@ -180,9 +180,9 @@ func populateDoublePoints(cfg MetricsCfg, pts pdata.DoubleDataPointSlice) { func populateDoubleHistogram(cfg MetricsCfg, dh pdata.Histogram) { pts := dh.DataPoints() - pts.Resize(cfg.NumPtsPerMetric) + pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { - pt := pts.At(i) + pt := pts.AppendEmpty() pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) pt.SetTimestamp(ts) @@ -217,9 +217,9 @@ func addDoubleHistogramVal(hdp pdata.HistogramDataPoint, val float64) { func populateIntHistogram(cfg MetricsCfg, dh pdata.IntHistogram) { pts := dh.DataPoints() - pts.Resize(cfg.NumPtsPerMetric) + pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { - pt := pts.At(i) + pt := pts.AppendEmpty() pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) pt.SetTimestamp(ts) diff --git a/internal/otel_collector/internal/goldendataset/pict_metrics_gen.go b/internal/otel_collector/internal/goldendataset/pict_metrics_gen.go index b84c53ff656..8c604e15661 100644 --- a/internal/otel_collector/internal/goldendataset/pict_metrics_gen.go +++ b/internal/otel_collector/internal/goldendataset/pict_metrics_gen.go @@ -17,7 +17,7 @@ package goldendataset import ( "fmt" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // GenerateMetrics takes the filename of a PICT-generated file, walks through all of the rows in the PICT @@ -73,12 +73,12 @@ func pictToCfg(inputs PICTMetricInputs) MetricsCfg { cfg.MetricDescriptorType = pdata.MetricDataTypeIntSum cfg.IsMonotonicSum = false case MetricTypeDoubleGauge: - cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleGauge + cfg.MetricDescriptorType = pdata.MetricDataTypeGauge case MetricTypeMonotonicDoubleSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleSum + cfg.MetricDescriptorType = pdata.MetricDataTypeSum cfg.IsMonotonicSum = true case MetricTypeNonMonotonicDoubleSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeDoubleSum + cfg.MetricDescriptorType = pdata.MetricDataTypeSum cfg.IsMonotonicSum = false case MetricTypeIntHistogram: cfg.MetricDescriptorType = pdata.MetricDataTypeIntHistogram diff --git a/internal/otel_collector/internal/goldendataset/resource_generator.go b/internal/otel_collector/internal/goldendataset/resource_generator.go index 37d3d19a872..4abda462e36 100644 --- a/internal/otel_collector/internal/goldendataset/resource_generator.go +++ b/internal/otel_collector/internal/goldendataset/resource_generator.go @@ -15,7 +15,7 @@ package goldendataset import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) @@ -118,9 +118,9 @@ func appendFassAttributes(attrMap pdata.AttributeMap) { func appendExecAttributes(attrMap pdata.AttributeMap) { attrMap.UpsertString(conventions.AttributeProcessExecutableName, "otelcol") parts := pdata.NewAttributeValueArray() - parts.ArrayVal().Append(pdata.NewAttributeValueString("otelcol")) - parts.ArrayVal().Append(pdata.NewAttributeValueString("--config=/etc/otel-collector-config.yaml")) - parts.ArrayVal().Append(pdata.NewAttributeValueString("--mem-ballast-size-mib=683")) + parts.ArrayVal().AppendEmpty().SetStringVal("otelcol") + parts.ArrayVal().AppendEmpty().SetStringVal("--config=/etc/otel-collector-config.yaml") + parts.ArrayVal().AppendEmpty().SetStringVal("--mem-ballast-size-mib=683") attrMap.Upsert(conventions.AttributeProcessCommandLine, parts) attrMap.UpsertString(conventions.AttributeProcessExecutablePath, "/usr/local/bin/otelcol") attrMap.UpsertInt(conventions.AttributeProcessID, 2020) diff --git a/internal/otel_collector/internal/goldendataset/span_generator.go b/internal/otel_collector/internal/goldendataset/span_generator.go index e013f698eb7..0ca1a20d9ac 100644 --- a/internal/otel_collector/internal/goldendataset/span_generator.go +++ b/internal/otel_collector/internal/goldendataset/span_generator.go @@ -19,7 +19,7 @@ import ( "io" "time" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) @@ -362,9 +362,9 @@ func appendMaxCountAttributes(includeStatus bool, attrMap pdata.AttributeMap) { attrMap.UpsertInt("ai-sampler.maxhops", 6) attrMap.UpsertString("application.create.location", "https://api.opentelemetry.io/blog/posts/806673B9-4F4D-4284-9635-3A3E3E3805BE") stages := pdata.NewAttributeValueArray() - stages.ArrayVal().Append(pdata.NewAttributeValueString("Launch")) - stages.ArrayVal().Append(pdata.NewAttributeValueString("Injestion")) - stages.ArrayVal().Append(pdata.NewAttributeValueString("Validation")) + stages.ArrayVal().AppendEmpty().SetStringVal("Launch") + stages.ArrayVal().AppendEmpty().SetStringVal("Injestion") + stages.ArrayVal().AppendEmpty().SetStringVal("Validation") attrMap.Upsert("application.stages", stages) subMap := pdata.NewAttributeValueMap() subMap.MapVal().InsertBool("UIx", false) diff --git a/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt b/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt index 9351db55e01..b4670fd0572 100644 --- a/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt +++ b/internal/otel_collector/internal/goldendataset/testdata/pict_input_metrics.txt @@ -1,4 +1,4 @@ NumPtsPerMetric: OnePt, ManyPts -MetricType: DoubleGauge, MonotonicDoubleSum, NonMonotonicDoubleSum, IntGauge, MonotonicIntSum, NonMonotonicIntSum, IntHistogram, DoubleHistogram +MetricType: DoubleGauge, MonotonicDoubleSum, NonMonotonicDoubleSum, IntGauge, MonotonicIntSum, NonMonotonicIntSum, IntHistogram, Histogram NumLabels: NoLabels, OneLabel, ManyLabels NumResourceAttrs: NoAttrs, OneAttr, TwoAttrs diff --git a/internal/otel_collector/internal/goldendataset/traces_generator.go b/internal/otel_collector/internal/goldendataset/traces_generator.go index dcc1c00362a..16d544b20c9 100644 --- a/internal/otel_collector/internal/goldendataset/traces_generator.go +++ b/internal/otel_collector/internal/goldendataset/traces_generator.go @@ -19,7 +19,7 @@ import ( "io" "math/rand" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // GenerateTraces generates a slice of OTLP ResourceSpans objects based on the PICT-generated pairwise diff --git a/internal/otel_collector/internal/idutils/big_endian_converter.go b/internal/otel_collector/internal/idutils/big_endian_converter.go index abc9d9bfc33..13e0aea2878 100644 --- a/internal/otel_collector/internal/idutils/big_endian_converter.go +++ b/internal/otel_collector/internal/idutils/big_endian_converter.go @@ -17,7 +17,7 @@ package idutils import ( "encoding/binary" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // UInt64ToTraceID converts the pair of uint64 representation of a TraceID to pdata.TraceID. diff --git a/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go b/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go index 8c01f84d055..ddb2ca5cb72 100644 --- a/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go +++ b/internal/otel_collector/internal/internalconsumertest/err_or_sink_consumer.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type ErrOrSinkConsumer struct { diff --git a/internal/otel_collector/internal/iruntime/mem_info.go b/internal/otel_collector/internal/iruntime/mem_info.go new file mode 100644 index 00000000000..53019e8dd44 --- /dev/null +++ b/internal/otel_collector/internal/iruntime/mem_info.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iruntime + +import ( + "github.com/shirou/gopsutil/mem" +) + +// readMemInfo returns the total memory +// supports in linux, darwin and windows +func readMemInfo() (uint64, error) { + vmStat, err := mem.VirtualMemory() + return vmStat.Total, err +} diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go b/internal/otel_collector/internal/iruntime/total_memory_linux.go similarity index 68% rename from internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go rename to internal/otel_collector/internal/iruntime/total_memory_linux.go index 611f4a0fac1..30dd7c9caee 100644 --- a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_linux.go +++ b/internal/otel_collector/internal/iruntime/total_memory_linux.go @@ -16,11 +16,15 @@ package iruntime -import "go.opentelemetry.io/collector/processor/memorylimiter/internal/cgroups" +import "go.opentelemetry.io/collector/internal/cgroups" + +// unlimitedMemorySize defines the bytes size when memory limit is not set +// for the container and process with cgroups +const unlimitedMemorySize = 9223372036854771712 // TotalMemory returns total available memory. // This implementation is meant for linux and uses cgroups to determine available memory. -func TotalMemory() (int64, error) { +func TotalMemory() (uint64, error) { cgroups, err := cgroups.NewCGroupsForCurrentProcess() if err != nil { return 0, err @@ -29,5 +33,14 @@ func TotalMemory() (int64, error) { if err != nil || !defined { return 0, err } - return memoryQuota, nil + + if memoryQuota == unlimitedMemorySize { + totalMem, err := readMemInfo() + if err != nil { + return 0, err + } + return totalMem, nil + } + + return uint64(memoryQuota), nil } diff --git a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go b/internal/otel_collector/internal/iruntime/total_memory_other.go similarity index 66% rename from internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go rename to internal/otel_collector/internal/iruntime/total_memory_other.go index 304efbf728e..2e7221a3a50 100644 --- a/internal/otel_collector/processor/memorylimiter/internal/iruntime/total_memory_other.go +++ b/internal/otel_collector/internal/iruntime/total_memory_other.go @@ -16,14 +16,7 @@ package iruntime -import ( - "fmt" -) - -var errTotalMemoryNotAvailable = fmt.Errorf("reading cgroups total memory is available only on linux") - -// TotalMemory returns total available memory. -// This is non-Linux version that returns -1 and errTotalMemoryNotAvailable. -func TotalMemory() (int64, error) { - return -1, errTotalMemoryNotAvailable +// TotalMemory returns total available memory for non-linux platforms. +func TotalMemory() (uint64, error) { + return readMemInfo() } diff --git a/internal/otel_collector/internal/model/README.md b/internal/otel_collector/internal/model/README.md deleted file mode 100644 index 932544202c8..00000000000 --- a/internal/otel_collector/internal/model/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Protocols - -This package provides common ways for decoding serialized bytes into protocol-specific in-memory data models (e.g. Zipkin Span). These data models can then be translated to internal pdata representations. Similarly, pdata can be translated from a data model which can then be serialized into bytes. - -**Encoding**: Common interfaces for serializing/deserializing bytes from/to protocol-specific data models. - -**Translation**: Common interfaces for translating protocol-specific data models from/to pdata. - -**Marshaling**: Common higher level APIs that do both encoding and translation of bytes and data model if going directly pdata ⇔ bytes. diff --git a/internal/otel_collector/internal/model/decoder.go b/internal/otel_collector/internal/model/decoder.go deleted file mode 100644 index c06f3732747..00000000000 --- a/internal/otel_collector/internal/model/decoder.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// MetricsDecoder interface to decode bytes into protocol-specific data model. -type MetricsDecoder interface { - // DecodeMetrics decodes bytes into protocol-specific data model. - DecodeMetrics(buf []byte) (interface{}, error) -} - -// TracesDecoder interface to decode bytes into protocol-specific data model. -type TracesDecoder interface { - // DecodeTraces decodes bytes into protocol-specific data model. - DecodeTraces(buf []byte) (interface{}, error) -} - -// LogsDecoder interface to decode bytes into protocol-specific data model. -type LogsDecoder interface { - // DecodeLogs decodes bytes into protocol-specific data model. - DecodeLogs(buf []byte) (interface{}, error) -} diff --git a/internal/otel_collector/internal/model/encoder.go b/internal/otel_collector/internal/model/encoder.go deleted file mode 100644 index 8ec58b74384..00000000000 --- a/internal/otel_collector/internal/model/encoder.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// MetricsEncoder encodes protocol-specific data model into bytes. -type MetricsEncoder interface { - // EncodeMetrics converts protocol-specific data model into bytes. - EncodeMetrics(model interface{}) ([]byte, error) -} - -// TracesEncoder encodes protocol-specific data model into bytes. -type TracesEncoder interface { - // EncodeTraces converts protocol-specific data model into bytes. - EncodeTraces(model interface{}) ([]byte, error) -} - -// LogsEncoder encodes protocol-specific data model into bytes. -type LogsEncoder interface { - // EncodeLogs converts protocol-specific data model into bytes. - EncodeLogs(model interface{}) ([]byte, error) -} diff --git a/internal/otel_collector/internal/model/encoding.go b/internal/otel_collector/internal/model/encoding.go deleted file mode 100644 index 69853bc72ea..00000000000 --- a/internal/otel_collector/internal/model/encoding.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "fmt" - -// Encoding is the encoding format that a model is serialized to. -type Encoding string - -const ( - Protobuf Encoding = "protobuf" - JSON Encoding = "json" - Thrift Encoding = "thrift" -) - -func (e Encoding) String() string { - return string(e) -} - -// ErrUnavailableEncoding is returned when the requested encoding is not supported. -type ErrUnavailableEncoding struct { - encoding Encoding -} - -func (e *ErrUnavailableEncoding) Error() string { - return fmt.Sprintf("unsupported encoding %q", e.encoding) -} - -func NewErrUnavailableEncoding(encoding Encoding) *ErrUnavailableEncoding { - return &ErrUnavailableEncoding{encoding: encoding} -} diff --git a/internal/otel_collector/internal/model/from_translator.go b/internal/otel_collector/internal/model/from_translator.go deleted file mode 100644 index f079acf05c1..00000000000 --- a/internal/otel_collector/internal/model/from_translator.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "go.opentelemetry.io/collector/consumer/pdata" - -// FromMetricsTranslator interface to translate pdata.Metrics to protocol-specific data model. -type FromMetricsTranslator interface { - // FromMetrics translates pdata.Metrics to protocol-specific data model. - FromMetrics(md pdata.Metrics) (interface{}, error) -} - -// FromTracesTranslator interface to translate pdata.Traces to protocol-specific data model. -type FromTracesTranslator interface { - // FromTraces translates pdata.Traces to protocol-specific data model. - FromTraces(td pdata.Traces) (interface{}, error) -} - -// FromLogsTranslator interface to translate pdata.Logs to protocol-specific data model. -type FromLogsTranslator interface { - // FromLogs translates pdata.Logs to protocol-specific data model. - FromLogs(ld pdata.Logs) (interface{}, error) -} diff --git a/internal/otel_collector/internal/model/marshaler.go b/internal/otel_collector/internal/model/marshaler.go deleted file mode 100644 index b79709218f2..00000000000 --- a/internal/otel_collector/internal/model/marshaler.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - - "go.opentelemetry.io/collector/consumer/pdata" -) - -// TracesMarshaler marshals pdata.Traces into bytes. -type TracesMarshaler interface { - Marshal(td pdata.Traces) ([]byte, error) -} - -type tracesMarshaler struct { - encoder TracesEncoder - translator FromTracesTranslator -} - -// NewTracesMarshaler returns a new TracesMarshaler. -func NewTracesMarshaler(encoder TracesEncoder, translator FromTracesTranslator) TracesMarshaler { - return &tracesMarshaler{ - encoder: encoder, - translator: translator, - } -} - -// Marshal pdata.Traces into bytes. On error []byte is nil. -func (t *tracesMarshaler) Marshal(td pdata.Traces) ([]byte, error) { - model, err := t.translator.FromTraces(td) - if err != nil { - return nil, fmt.Errorf("converting pdata to model failed: %w", err) - } - buf, err := t.encoder.EncodeTraces(model) - if err != nil { - return nil, fmt.Errorf("marshal failed: %w", err) - } - return buf, nil -} - -// MetricsMarshaler marshals pdata.Metrics into bytes. -type MetricsMarshaler interface { - Marshal(td pdata.Metrics) ([]byte, error) -} - -type metricsMarshaler struct { - encoder MetricsEncoder - translator FromMetricsTranslator -} - -// NewMetricsMarshaler returns a new MetricsMarshaler. -func NewMetricsMarshaler(encoder MetricsEncoder, translator FromMetricsTranslator) MetricsMarshaler { - return &metricsMarshaler{ - encoder: encoder, - translator: translator, - } -} - -// Marshal pdata.Metrics into bytes. On error []byte is nil. -func (t *metricsMarshaler) Marshal(td pdata.Metrics) ([]byte, error) { - model, err := t.translator.FromMetrics(td) - if err != nil { - return nil, fmt.Errorf("converting pdata to model failed: %w", err) - } - buf, err := t.encoder.EncodeMetrics(model) - if err != nil { - return nil, fmt.Errorf("marshal failed: %w", err) - } - return buf, nil -} - -// LogsMarshaler marshals pdata.Logs into bytes. -type LogsMarshaler interface { - Marshal(td pdata.Logs) ([]byte, error) -} - -type logsMarshaler struct { - encoder LogsEncoder - translator FromLogsTranslator -} - -// NewLogsMarshaler returns a new LogsMarshaler. -func NewLogsMarshaler(encoder LogsEncoder, translator FromLogsTranslator) LogsMarshaler { - return &logsMarshaler{ - encoder: encoder, - translator: translator, - } -} - -// Marshal pdata.Logs into bytes. On error []byte is nil. -func (t *logsMarshaler) Marshal(td pdata.Logs) ([]byte, error) { - model, err := t.translator.FromLogs(td) - if err != nil { - return nil, fmt.Errorf("converting pdata to model failed: %w", err) - } - buf, err := t.encoder.EncodeLogs(model) - if err != nil { - return nil, fmt.Errorf("marshal failed: %w", err) - } - return buf, nil -} diff --git a/internal/otel_collector/internal/model/to_translator.go b/internal/otel_collector/internal/model/to_translator.go deleted file mode 100644 index b5652d63f9f..00000000000 --- a/internal/otel_collector/internal/model/to_translator.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "go.opentelemetry.io/collector/consumer/pdata" - -type ToMetricsTranslator interface { - // ToMetrics converts a protocol-specific data model into pdata. - ToMetrics(src interface{}) (pdata.Metrics, error) -} - -type ToTracesTranslator interface { - // ToTraces converts a protocol-specific data model into pdata. - ToTraces(src interface{}) (pdata.Traces, error) -} - -type ToLogsTranslator interface { - // ToLogs converts a protocol-specific data model into pdata. - ToLogs(src interface{}) (pdata.Logs, error) -} diff --git a/internal/otel_collector/internal/model/unmarshal.go b/internal/otel_collector/internal/model/unmarshal.go deleted file mode 100644 index da2b18f606a..00000000000 --- a/internal/otel_collector/internal/model/unmarshal.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - - "go.opentelemetry.io/collector/consumer/pdata" -) - -// TracesUnmarshaler unmarshalls bytes into pdata.Traces. -type TracesUnmarshaler interface { - Unmarshal(buf []byte) (pdata.Traces, error) -} - -type tracesUnmarshaler struct { - decoder TracesDecoder - translator ToTracesTranslator -} - -// NewTracesUnmarshaler returns a new TracesUnmarshaler. -func NewTracesUnmarshaler(decoder TracesDecoder, translator ToTracesTranslator) TracesUnmarshaler { - return &tracesUnmarshaler{ - decoder: decoder, - translator: translator, - } -} - -// Unmarshal bytes into pdata.Traces. On error pdata.Traces is invalid. -func (t *tracesUnmarshaler) Unmarshal(buf []byte) (pdata.Traces, error) { - model, err := t.decoder.DecodeTraces(buf) - if err != nil { - return pdata.Traces{}, fmt.Errorf("unmarshal failed: %w", err) - } - td, err := t.translator.ToTraces(model) - if err != nil { - return pdata.Traces{}, fmt.Errorf("converting model to pdata failed: %w", err) - } - return td, nil -} - -// MetricsUnmarshaler unmarshalls bytes into pdata.Metrics. -type MetricsUnmarshaler interface { - Unmarshal(buf []byte) (pdata.Metrics, error) -} - -type metricsUnmarshaler struct { - decoder MetricsDecoder - translator ToMetricsTranslator -} - -// NewMetricsUnmarshaler returns a new MetricsUnmarshaler. -func NewMetricsUnmarshaler(decoder MetricsDecoder, translator ToMetricsTranslator) MetricsUnmarshaler { - return &metricsUnmarshaler{ - decoder: decoder, - translator: translator, - } -} - -// Unmarshal bytes into pdata.Metrics. On error pdata.Metrics is invalid. -func (t *metricsUnmarshaler) Unmarshal(buf []byte) (pdata.Metrics, error) { - model, err := t.decoder.DecodeMetrics(buf) - if err != nil { - return pdata.Metrics{}, fmt.Errorf("unmarshal failed: %w", err) - } - td, err := t.translator.ToMetrics(model) - if err != nil { - return pdata.Metrics{}, fmt.Errorf("converting model to pdata failed: %w", err) - } - return td, nil -} - -// LogsUnmarshaler unmarshalls bytes into pdata.Logs. -type LogsUnmarshaler interface { - Unmarshal(buf []byte) (pdata.Logs, error) -} - -type logsUnmarshaler struct { - decoder LogsDecoder - translator ToLogsTranslator -} - -// NewLogsUnmarshaler returns a new LogsUnmarshaler. -func NewLogsUnmarshaler(decoder LogsDecoder, translator ToLogsTranslator) LogsUnmarshaler { - return &logsUnmarshaler{ - decoder: decoder, - translator: translator, - } -} - -// Unmarshal bytes into pdata.Logs. On error pdata.Logs is invalid. -func (t *logsUnmarshaler) Unmarshal(buf []byte) (pdata.Logs, error) { - model, err := t.decoder.DecodeLogs(buf) - if err != nil { - return pdata.Logs{}, fmt.Errorf("unmarshal failed: %w", err) - } - td, err := t.translator.ToLogs(model) - if err != nil { - return pdata.Logs{}, fmt.Errorf("converting model to pdata failed: %w", err) - } - return td, nil -} diff --git a/internal/otel_collector/internal/otlp/from_translator.go b/internal/otel_collector/internal/otlp/from_translator.go deleted file mode 100644 index c193487d054..00000000000 --- a/internal/otel_collector/internal/otlp/from_translator.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" -) - -type fromTranslator struct{} - -func newFromTranslator() *fromTranslator { - return &fromTranslator{} -} - -func (d *fromTranslator) FromLogs(ld pdata.Logs) (interface{}, error) { - return internal.LogsToOtlp(ld.InternalRep()), nil -} - -func (d *fromTranslator) FromMetrics(md pdata.Metrics) (interface{}, error) { - return internal.MetricsToOtlp(md.InternalRep()), nil -} - -func (d *fromTranslator) FromTraces(td pdata.Traces) (interface{}, error) { - return internal.TracesToOtlp(td.InternalRep()), nil -} diff --git a/internal/otel_collector/internal/otlp/json_decoder.go b/internal/otel_collector/internal/otlp/json_decoder.go deleted file mode 100644 index 7f0b5429de5..00000000000 --- a/internal/otel_collector/internal/otlp/json_decoder.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "bytes" - - "github.com/gogo/protobuf/jsonpb" - - otlpcollectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" -) - -type jsonDecoder struct { - delegate jsonpb.Unmarshaler -} - -func newJSONDecoder() *jsonDecoder { - return &jsonDecoder{delegate: jsonpb.Unmarshaler{}} -} - -func (d *jsonDecoder) DecodeLogs(buf []byte) (interface{}, error) { - ld := &otlpcollectorlogs.ExportLogsServiceRequest{} - if err := d.delegate.Unmarshal(bytes.NewReader(buf), ld); err != nil { - return nil, err - } - return ld, nil -} - -func (d *jsonDecoder) DecodeMetrics(buf []byte) (interface{}, error) { - md := &otlpcollectormetrics.ExportMetricsServiceRequest{} - if err := d.delegate.Unmarshal(bytes.NewReader(buf), md); err != nil { - return nil, err - } - return md, nil -} - -func (d *jsonDecoder) DecodeTraces(buf []byte) (interface{}, error) { - td := &otlpcollectortrace.ExportTraceServiceRequest{} - if err := d.delegate.Unmarshal(bytes.NewReader(buf), td); err != nil { - return nil, err - } - return td, nil -} diff --git a/internal/otel_collector/internal/otlp/json_encoder.go b/internal/otel_collector/internal/otlp/json_encoder.go deleted file mode 100644 index 6fbe8427047..00000000000 --- a/internal/otel_collector/internal/otlp/json_encoder.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "bytes" - - "github.com/gogo/protobuf/jsonpb" - - otlpcollectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/internal/model" -) - -type jsonEncoder struct { - delegate jsonpb.Marshaler -} - -func newJSONEncoder() *jsonEncoder { - return &jsonEncoder{delegate: jsonpb.Marshaler{}} -} - -func (e *jsonEncoder) EncodeLogs(modelData interface{}) ([]byte, error) { - ld, ok := modelData.(*otlpcollectorlogs.ExportLogsServiceRequest) - if !ok { - return nil, model.NewErrIncompatibleType(&otlpcollectorlogs.ExportLogsServiceRequest{}, modelData) - } - buf := bytes.Buffer{} - if err := e.delegate.Marshal(&buf, ld); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (e *jsonEncoder) EncodeMetrics(modelData interface{}) ([]byte, error) { - md, ok := modelData.(*otlpcollectormetrics.ExportMetricsServiceRequest) - if !ok { - return nil, model.NewErrIncompatibleType(&otlpcollectormetrics.ExportMetricsServiceRequest{}, modelData) - } - buf := bytes.Buffer{} - if err := e.delegate.Marshal(&buf, md); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (e *jsonEncoder) EncodeTraces(modelData interface{}) ([]byte, error) { - td, ok := modelData.(*otlpcollectortrace.ExportTraceServiceRequest) - if !ok { - return nil, model.NewErrIncompatibleType(&otlpcollectortrace.ExportTraceServiceRequest{}, modelData) - } - buf := bytes.Buffer{} - if err := e.delegate.Marshal(&buf, td); err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/internal/otel_collector/internal/otlp/marshaler.go b/internal/otel_collector/internal/otlp/marshaler.go deleted file mode 100644 index 12da9a7a634..00000000000 --- a/internal/otel_collector/internal/otlp/marshaler.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "go.opentelemetry.io/collector/internal/model" -) - -// NewJSONTracesMarshaler returns a model.TracesMarshaler to decode from OTLP json bytes. -func NewJSONTracesMarshaler() model.TracesMarshaler { - return model.NewTracesMarshaler(newJSONEncoder(), newFromTranslator()) -} - -// NewJSONMetricsMarshaler returns a model.MetricsMarshaler to decode from OTLP json bytes. -func NewJSONMetricsMarshaler() model.MetricsMarshaler { - return model.NewMetricsMarshaler(newJSONEncoder(), newFromTranslator()) -} - -// NewJSONLogsMarshaler returns a model.LogsMarshaler to decode from OTLP json bytes. -func NewJSONLogsMarshaler() model.LogsMarshaler { - return model.NewLogsMarshaler(newJSONEncoder(), newFromTranslator()) -} diff --git a/internal/otel_collector/internal/otlp/to_translator.go b/internal/otel_collector/internal/otlp/to_translator.go deleted file mode 100644 index 5b347db3a31..00000000000 --- a/internal/otel_collector/internal/otlp/to_translator.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/internal/model" -) - -type toTranslator struct{} - -func newToTranslator() *toTranslator { - return &toTranslator{} -} - -func (d *toTranslator) ToLogs(modelData interface{}) (pdata.Logs, error) { - ld, ok := modelData.(*otlpcollectorlogs.ExportLogsServiceRequest) - if !ok { - return pdata.Logs{}, model.NewErrIncompatibleType(&otlpcollectorlogs.ExportLogsServiceRequest{}, modelData) - } - return pdata.LogsFromInternalRep(internal.LogsFromOtlp(ld)), nil -} - -func (d *toTranslator) ToMetrics(modelData interface{}) (pdata.Metrics, error) { - ld, ok := modelData.(*otlpcollectormetrics.ExportMetricsServiceRequest) - if !ok { - return pdata.Metrics{}, model.NewErrIncompatibleType(&otlpcollectormetrics.ExportMetricsServiceRequest{}, modelData) - } - return pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(ld)), nil -} - -func (d *toTranslator) ToTraces(modelData interface{}) (pdata.Traces, error) { - td, ok := modelData.(*otlpcollectortrace.ExportTraceServiceRequest) - if !ok { - return pdata.Traces{}, model.NewErrIncompatibleType(&otlpcollectortrace.ExportTraceServiceRequest{}, modelData) - } - return pdata.TracesFromInternalRep(internal.TracesFromOtlp(td)), nil -} diff --git a/internal/otel_collector/internal/otlp/unmarshaler.go b/internal/otel_collector/internal/otlp/unmarshaler.go deleted file mode 100644 index 5cb8a48ca1d..00000000000 --- a/internal/otel_collector/internal/otlp/unmarshaler.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlp - -import ( - "go.opentelemetry.io/collector/internal/model" -) - -// NewJSONTracesUnmarshaler returns a model.TracesUnmarshaler to decode from OTLP json bytes. -func NewJSONTracesUnmarshaler() model.TracesUnmarshaler { - return model.NewTracesUnmarshaler(newJSONDecoder(), newToTranslator()) -} - -// NewJSONMetricsUnmarshaler returns a model.MetricsUnmarshaler to decode from OTLP json bytes. -func NewJSONMetricsUnmarshaler() model.MetricsUnmarshaler { - return model.NewMetricsUnmarshaler(newJSONDecoder(), newToTranslator()) -} - -// NewJSONLogsUnmarshaler returns a model.LogsUnmarshaler to decode from OTLP json bytes. -func NewJSONLogsUnmarshaler() model.LogsUnmarshaler { - return model.NewLogsUnmarshaler(newJSONDecoder(), newToTranslator()) -} diff --git a/internal/otel_collector/internal/otlp_wrapper.go b/internal/otel_collector/internal/otlp_wrapper.go deleted file mode 100644 index e6e0c3a870b..00000000000 --- a/internal/otel_collector/internal/otlp_wrapper.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - otlpcollectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" -) - -// MetricsWrapper is an intermediary struct that is declared in an internal package -// as a way to prevent certain functions of pdata.Metrics data type to be callable by -// any code outside of this module. -type MetricsWrapper struct { - req *otlpcollectormetrics.ExportMetricsServiceRequest -} - -// MetricsToOtlp internal helper to convert MetricsWrapper to protobuf representation. -func MetricsToOtlp(mw MetricsWrapper) *otlpcollectormetrics.ExportMetricsServiceRequest { - return mw.req -} - -// MetricsFromOtlp internal helper to convert protobuf representation to MetricsWrapper. -func MetricsFromOtlp(req *otlpcollectormetrics.ExportMetricsServiceRequest) MetricsWrapper { - return MetricsWrapper{req: req} -} - -// TracesWrapper is an intermediary struct that is declared in an internal package -// as a way to prevent certain functions of pdata.Traces data type to be callable by -// any code outside of this module. -type TracesWrapper struct { - req *otlpcollectortrace.ExportTraceServiceRequest -} - -// TracesToOtlp internal helper to convert TracesWrapper to protobuf representation. -func TracesToOtlp(mw TracesWrapper) *otlpcollectortrace.ExportTraceServiceRequest { - return mw.req -} - -// TracesFromOtlp internal helper to convert protobuf representation to TracesWrapper. -func TracesFromOtlp(req *otlpcollectortrace.ExportTraceServiceRequest) TracesWrapper { - return TracesWrapper{req: req} -} - -// TracesCompatibilityChanges performs backward compatibility conversion of Span Status code according to -// OTLP specification as we are a new receiver and sender (we are pushing data to the pipelines): -// See https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L239 -// See https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L253 -func TracesCompatibilityChanges(req *otlpcollectortrace.ExportTraceServiceRequest) { - for _, rss := range req.ResourceSpans { - for _, ils := range rss.InstrumentationLibrarySpans { - for _, span := range ils.Spans { - switch span.Status.Code { - case otlptrace.Status_STATUS_CODE_UNSET: - if span.Status.DeprecatedCode != otlptrace.Status_DEPRECATED_STATUS_CODE_OK { - span.Status.Code = otlptrace.Status_STATUS_CODE_ERROR - } - case otlptrace.Status_STATUS_CODE_OK: - // If status code is set then overwrites deprecated. - span.Status.DeprecatedCode = otlptrace.Status_DEPRECATED_STATUS_CODE_OK - case otlptrace.Status_STATUS_CODE_ERROR: - span.Status.DeprecatedCode = otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR - } - } - } - } -} - -// LogsWrapper is an intermediary struct that is declared in an internal package -// as a way to prevent certain functions of pdata.Logs data type to be callable by -// any code outside of this module. -type LogsWrapper struct { - req *otlpcollectorlog.ExportLogsServiceRequest -} - -// LogsToOtlp internal helper to convert LogsWrapper to protobuf representation. -func LogsToOtlp(l LogsWrapper) *otlpcollectorlog.ExportLogsServiceRequest { - return l.req -} - -// LogsFromOtlp internal helper to convert protobuf representation to LogsWrapper. -func LogsFromOtlp(req *otlpcollectorlog.ExportLogsServiceRequest) LogsWrapper { - return LogsWrapper{req: req} -} diff --git a/internal/otel_collector/internal/otlptext/databuffer.go b/internal/otel_collector/internal/otlptext/databuffer.go index e780606aece..85bd315910b 100644 --- a/internal/otel_collector/internal/otlptext/databuffer.go +++ b/internal/otel_collector/internal/otlptext/databuffer.go @@ -15,21 +15,22 @@ package otlptext import ( + "bytes" "fmt" "strconv" "strings" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) type dataBuffer struct { - str strings.Builder + buf bytes.Buffer } func (b *dataBuffer) logEntry(format string, a ...interface{}) { - b.str.WriteString(fmt.Sprintf(format, a...)) - b.str.WriteString("\n") + b.buf.WriteString(fmt.Sprintf(format, a...)) + b.buf.WriteString("\n") } func (b *dataBuffer) logAttr(label string, value string) { @@ -81,15 +82,15 @@ func (b *dataBuffer) logMetricDataPoints(m pdata.Metric) { return case pdata.MetricDataTypeIntGauge: b.logIntDataPoints(m.IntGauge().DataPoints()) - case pdata.MetricDataTypeDoubleGauge: - b.logDoubleDataPoints(m.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeGauge: + b.logDoubleDataPoints(m.Gauge().DataPoints()) case pdata.MetricDataTypeIntSum: data := m.IntSum() b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) b.logIntDataPoints(data.DataPoints()) - case pdata.MetricDataTypeDoubleSum: - data := m.DoubleSum() + case pdata.MetricDataTypeSum: + data := m.Sum() b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) b.logDoubleDataPoints(data.DataPoints()) diff --git a/internal/otel_collector/internal/otlptext/logs.go b/internal/otel_collector/internal/otlptext/logs.go index d2058e18da0..1676de15d1a 100644 --- a/internal/otel_collector/internal/otlptext/logs.go +++ b/internal/otel_collector/internal/otlptext/logs.go @@ -14,10 +14,19 @@ package otlptext -import "go.opentelemetry.io/collector/consumer/pdata" +import ( + "go.opentelemetry.io/collector/model/pdata" +) -// Logs data to text -func Logs(ld pdata.Logs) string { +// NewTextLogsMarshaler returns a serializer.LogsMarshaler to encode to OTLP text bytes. +func NewTextLogsMarshaler() pdata.LogsMarshaler { + return textLogsMarshaler{} +} + +type textLogsMarshaler struct{} + +// MarshalLogs pdata.Logs to OTLP text. +func (textLogsMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) { buf := dataBuffer{} rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { @@ -39,5 +48,5 @@ func Logs(ld pdata.Logs) string { } } - return buf.str.String() + return buf.buf.Bytes(), nil } diff --git a/internal/otel_collector/internal/otlptext/metrics.go b/internal/otel_collector/internal/otlptext/metrics.go index c9d5cc1c15f..a107de27e5f 100644 --- a/internal/otel_collector/internal/otlptext/metrics.go +++ b/internal/otel_collector/internal/otlptext/metrics.go @@ -14,10 +14,19 @@ package otlptext -import "go.opentelemetry.io/collector/consumer/pdata" +import ( + "go.opentelemetry.io/collector/model/pdata" +) -// Metrics data to text -func Metrics(md pdata.Metrics) string { +// NewTextMetricsMarshaler returns a serializer.MetricsMarshaler to encode to OTLP text bytes. +func NewTextMetricsMarshaler() pdata.MetricsMarshaler { + return textMetricsMarshaler{} +} + +type textMetricsMarshaler struct{} + +// MarshalMetrics pdata.Metrics to OTLP text. +func (textMetricsMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) { buf := dataBuffer{} rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { @@ -39,5 +48,5 @@ func Metrics(md pdata.Metrics) string { } } - return buf.str.String() + return buf.buf.Bytes(), nil } diff --git a/internal/otel_collector/internal/otlptext/traces.go b/internal/otel_collector/internal/otlptext/traces.go index 6718ccd169c..d3e3cc6548c 100644 --- a/internal/otel_collector/internal/otlptext/traces.go +++ b/internal/otel_collector/internal/otlptext/traces.go @@ -14,10 +14,19 @@ package otlptext -import "go.opentelemetry.io/collector/consumer/pdata" +import ( + "go.opentelemetry.io/collector/model/pdata" +) -// Traces data to text -func Traces(td pdata.Traces) string { +// NewTextTracesMarshaler returns a serializer.TracesMarshaler to encode to OTLP text bytes. +func NewTextTracesMarshaler() pdata.TracesMarshaler { + return textTracesMarshaler{} +} + +type textTracesMarshaler struct{} + +// MarshalTraces pdata.Traces to OTLP text. +func (textTracesMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { buf := dataBuffer{} rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { @@ -52,5 +61,5 @@ func Traces(td pdata.Traces) string { } } - return buf.str.String() + return buf.buf.Bytes(), nil } diff --git a/internal/otel_collector/internal/pdatagrpc/logs.go b/internal/otel_collector/internal/pdatagrpc/logs.go deleted file mode 100644 index 0c1578d8e12..00000000000 --- a/internal/otel_collector/internal/pdatagrpc/logs.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdatagrpc - -import ( - "context" - - "google.golang.org/grpc" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlpcollectorlogs "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" -) - -// TODO: Consider to add `LogsRequest` and `LogsResponse`. Right now the funcs return interface{}, -// it would be better and future proof to create a LogsResponse empty struct and return that. -// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. - -// LogsClient is the client API for OTLP-GRPC Logs service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LogsClient interface { - // Export pdata.Logs to the server. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in pdata.Logs, opts ...grpc.CallOption) (interface{}, error) -} - -type logsClient struct { - rawClient otlpcollectorlogs.LogsServiceClient -} - -// NewLogsClient returns a new LogsClient connected using the given connection. -func NewLogsClient(cc *grpc.ClientConn) LogsClient { - return &logsClient{rawClient: otlpcollectorlogs.NewLogsServiceClient(cc)} -} - -func (c *logsClient) Export(ctx context.Context, in pdata.Logs, opts ...grpc.CallOption) (interface{}, error) { - return c.rawClient.Export(ctx, internal.LogsToOtlp(in.InternalRep()), opts...) -} - -// LogsServer is the server API for OTLP gRPC LogsService service. -type LogsServer interface { - // Export is called every time a new request is received. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, pdata.Logs) (interface{}, error) -} - -// RegisterLogsServer registers the LogsServer to the grpc.Server. -func RegisterLogsServer(s *grpc.Server, srv LogsServer) { - otlpcollectorlogs.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) -} - -type rawLogsServer struct { - srv LogsServer -} - -func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlogs.ExportLogsServiceRequest) (*otlpcollectorlogs.ExportLogsServiceResponse, error) { - _, err := s.srv.Export(ctx, pdata.LogsFromInternalRep(internal.LogsFromOtlp(request))) - return &otlpcollectorlogs.ExportLogsServiceResponse{}, err -} diff --git a/internal/otel_collector/internal/pdatagrpc/metrics.go b/internal/otel_collector/internal/pdatagrpc/metrics.go deleted file mode 100644 index 36f15f75599..00000000000 --- a/internal/otel_collector/internal/pdatagrpc/metrics.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdatagrpc - -import ( - "context" - - "google.golang.org/grpc" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" -) - -// TODO: Consider to add `MetricsRequest` and `MetricsResponse`. Right now the funcs return interface{}, -// it would be better and future proof to create a MetricsResponse empty struct and return that. -// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. - -// MetricsClient is the client API for OTLP-GRPC Metrics service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsClient interface { - // Export pdata.Metrics to the server. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in pdata.Metrics, opts ...grpc.CallOption) (interface{}, error) -} - -type metricsClient struct { - rawClient otlpcollectormetrics.MetricsServiceClient -} - -// NewMetricsClient returns a new MetricsClient connected using the given connection. -func NewMetricsClient(cc *grpc.ClientConn) MetricsClient { - return &metricsClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)} -} - -func (c *metricsClient) Export(ctx context.Context, in pdata.Metrics, opts ...grpc.CallOption) (interface{}, error) { - return c.rawClient.Export(ctx, internal.MetricsToOtlp(in.InternalRep()), opts...) -} - -// MetricsServer is the server API for OTLP gRPC MetricsService service. -type MetricsServer interface { - // Export is called every time a new request is received. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, pdata.Metrics) (interface{}, error) -} - -// RegisterMetricsServer registers the MetricsServer to the grpc.Server. -func RegisterMetricsServer(s *grpc.Server, srv MetricsServer) { - otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) -} - -type rawMetricsServer struct { - srv MetricsServer -} - -func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { - _, err := s.srv.Export(ctx, pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(request))) - return &otlpcollectormetrics.ExportMetricsServiceResponse{}, err -} diff --git a/internal/otel_collector/internal/pdatagrpc/traces.go b/internal/otel_collector/internal/pdatagrpc/traces.go deleted file mode 100644 index 9bbd9a23f22..00000000000 --- a/internal/otel_collector/internal/pdatagrpc/traces.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdatagrpc - -import ( - "context" - - "google.golang.org/grpc" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlpcollectortraces "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" -) - -// TODO: Consider to add `TracesRequest` and `TracesResponse`. Right now the funcs return interface{}, -// it would be better and future proof to create a TracesResponse empty struct and return that. -// So if we ever add things in the OTLP response I can deal with that. Similar for request if we add non pdata properties. - -// TracesClient is the client API for OTLP-GRPC Traces service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TracesClient interface { - // Export pdata.Traces to the server. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in pdata.Traces, opts ...grpc.CallOption) (interface{}, error) -} - -type tracesClient struct { - rawClient otlpcollectortraces.TraceServiceClient -} - -// NewTracesClient returns a new TracesClient connected using the given connection. -func NewTracesClient(cc *grpc.ClientConn) TracesClient { - return &tracesClient{rawClient: otlpcollectortraces.NewTraceServiceClient(cc)} -} - -// Export implements the TracesClient interface. -func (c *tracesClient) Export(ctx context.Context, in pdata.Traces, opts ...grpc.CallOption) (interface{}, error) { - return c.rawClient.Export(ctx, internal.TracesToOtlp(in.InternalRep()), opts...) -} - -// TracesServer is the server API for OTLP gRPC TracesService service. -type TracesServer interface { - // Export is called every time a new request is received. - // - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, pdata.Traces) (interface{}, error) -} - -// RegisterTracesServer registers the TracesServer to the grpc.Server. -func RegisterTracesServer(s *grpc.Server, srv TracesServer) { - otlpcollectortraces.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv}) -} - -type rawTracesServer struct { - srv TracesServer -} - -func (s rawTracesServer) Export(ctx context.Context, request *otlpcollectortraces.ExportTraceServiceRequest) (*otlpcollectortraces.ExportTraceServiceResponse, error) { - _, err := s.srv.Export(ctx, pdata.TracesFromInternalRep(internal.TracesFromOtlp(request))) - return &otlpcollectortraces.ExportTraceServiceResponse{}, err -} diff --git a/internal/otel_collector/internal/processor/filterexpr/matcher.go b/internal/otel_collector/internal/processor/filterexpr/matcher.go index c84212abe9b..b40ccc7b480 100644 --- a/internal/otel_collector/internal/processor/filterexpr/matcher.go +++ b/internal/otel_collector/internal/processor/filterexpr/matcher.go @@ -18,7 +18,7 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type Matcher struct { @@ -46,12 +46,12 @@ func (m *Matcher) MatchMetric(metric pdata.Metric) (bool, error) { switch metric.DataType() { case pdata.MetricDataTypeIntGauge: return m.matchIntGauge(metricName, metric.IntGauge()) - case pdata.MetricDataTypeDoubleGauge: - return m.matchDoubleGauge(metricName, metric.DoubleGauge()) + case pdata.MetricDataTypeGauge: + return m.matchGauge(metricName, metric.Gauge()) case pdata.MetricDataTypeIntSum: return m.matchIntSum(metricName, metric.IntSum()) - case pdata.MetricDataTypeDoubleSum: - return m.matchDoubleSum(metricName, metric.DoubleSum()) + case pdata.MetricDataTypeSum: + return m.matchSum(metricName, metric.Sum()) case pdata.MetricDataTypeIntHistogram: return m.matchIntHistogram(metricName, metric.IntHistogram()) case pdata.MetricDataTypeHistogram: @@ -75,7 +75,7 @@ func (m *Matcher) matchIntGauge(metricName string, gauge pdata.IntGauge) (bool, return false, nil } -func (m *Matcher) matchDoubleGauge(metricName string, gauge pdata.DoubleGauge) (bool, error) { +func (m *Matcher) matchGauge(metricName string, gauge pdata.Gauge) (bool, error) { pts := gauge.DataPoints() for i := 0; i < pts.Len(); i++ { matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) @@ -89,7 +89,7 @@ func (m *Matcher) matchDoubleGauge(metricName string, gauge pdata.DoubleGauge) ( return false, nil } -func (m *Matcher) matchDoubleSum(metricName string, sum pdata.DoubleSum) (bool, error) { +func (m *Matcher) matchSum(metricName string, sum pdata.Sum) (bool, error) { pts := sum.DataPoints() for i := 0; i < pts.Len(); i++ { matched, err := m.matchEnv(metricName, pts.At(i).LabelsMap()) diff --git a/internal/otel_collector/internal/processor/filterhelper/filterhelper.go b/internal/otel_collector/internal/processor/filterhelper/filterhelper.go index fca006d36c4..393d32d945a 100644 --- a/internal/otel_collector/internal/processor/filterhelper/filterhelper.go +++ b/internal/otel_collector/internal/processor/filterhelper/filterhelper.go @@ -19,7 +19,7 @@ import ( "github.com/spf13/cast" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // NewAttributeValueRaw is used to convert the raw `value` from ActionKeyValue to the supported trace attribute values. diff --git a/internal/otel_collector/internal/processor/filterlog/filterlog.go b/internal/otel_collector/internal/processor/filterlog/filterlog.go index bcb1ed3f5ad..96aae6a60f0 100644 --- a/internal/otel_collector/internal/processor/filterlog/filterlog.go +++ b/internal/otel_collector/internal/processor/filterlog/filterlog.go @@ -17,10 +17,10 @@ package filterlog import ( "fmt" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterconfig" "go.opentelemetry.io/collector/internal/processor/filtermatcher" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" ) // Matcher is an interface that allows matching a log record against a diff --git a/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go b/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go index 96b77fe76dd..3b8f301b734 100644 --- a/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go +++ b/internal/otel_collector/internal/processor/filtermatcher/attributematcher.go @@ -19,10 +19,10 @@ import ( "fmt" "strconv" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterconfig" "go.opentelemetry.io/collector/internal/processor/filterhelper" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" ) type AttributesMatcher []AttributeMatcher diff --git a/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go index 9f192318d8e..a809867693d 100644 --- a/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go +++ b/internal/otel_collector/internal/processor/filtermatcher/filtermatcher.go @@ -17,9 +17,9 @@ package filtermatcher import ( "fmt" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterconfig" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" ) type instrumentationLibraryMatcher struct { diff --git a/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go b/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go index 50352293d88..2eb0cc869f4 100644 --- a/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go +++ b/internal/otel_collector/internal/processor/filtermetric/expr_matcher.go @@ -15,8 +15,8 @@ package filtermetric import ( - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterexpr" + "go.opentelemetry.io/collector/model/pdata" ) type exprMatcher struct { diff --git a/internal/otel_collector/internal/processor/filtermetric/filtermetric.go b/internal/otel_collector/internal/processor/filtermetric/filtermetric.go index ec9c176ccf7..6983aeb7b83 100644 --- a/internal/otel_collector/internal/processor/filtermetric/filtermetric.go +++ b/internal/otel_collector/internal/processor/filtermetric/filtermetric.go @@ -15,7 +15,7 @@ package filtermetric import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type Matcher interface { diff --git a/internal/otel_collector/internal/processor/filtermetric/name_matcher.go b/internal/otel_collector/internal/processor/filtermetric/name_matcher.go index 41b99407827..3daa1bf84ee 100644 --- a/internal/otel_collector/internal/processor/filtermetric/name_matcher.go +++ b/internal/otel_collector/internal/processor/filtermetric/name_matcher.go @@ -15,8 +15,8 @@ package filtermetric import ( - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" ) // nameMatcher matches metrics by metric properties against prespecified values for each property. diff --git a/internal/otel_collector/internal/processor/filterspan/filterspan.go b/internal/otel_collector/internal/processor/filterspan/filterspan.go index 7f2e2bfac76..9092ed624ea 100644 --- a/internal/otel_collector/internal/processor/filterspan/filterspan.go +++ b/internal/otel_collector/internal/processor/filterspan/filterspan.go @@ -17,10 +17,10 @@ package filterspan import ( "fmt" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterconfig" "go.opentelemetry.io/collector/internal/processor/filtermatcher" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) diff --git a/internal/otel_collector/internal/testcomponents/example_exporter.go b/internal/otel_collector/internal/testcomponents/example_exporter.go index 159f3cd1c0e..d5c0cc9a13a 100644 --- a/internal/otel_collector/internal/testcomponents/example_exporter.go +++ b/internal/otel_collector/internal/testcomponents/example_exporter.go @@ -21,8 +21,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configparser" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/model/pdata" ) var _ config.CustomUnmarshable = (*ExampleExporter)(nil) diff --git a/internal/otel_collector/internal/testdata/common.go b/internal/otel_collector/internal/testdata/common.go index a3eab93d37b..ced24a4eb13 100644 --- a/internal/otel_collector/internal/testdata/common.go +++ b/internal/otel_collector/internal/testdata/common.go @@ -15,9 +15,7 @@ package testdata import ( - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) var ( @@ -43,136 +41,38 @@ func initResourceAttributes1(dest pdata.AttributeMap) { dest.InitFromMap(resourceAttributes1) } -func generateOtlpResourceAttributes1() []otlpcommon.KeyValue { - return []otlpcommon.KeyValue{ - { - Key: "resource-attr", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "resource-attr-val-1"}}, - }, - } -} - func initResourceAttributes2(dest pdata.AttributeMap) { dest.InitFromMap(resourceAttributes2) } -func generateOtlpResourceAttributes2() []otlpcommon.KeyValue { - return []otlpcommon.KeyValue{ - { - Key: "resource-attr", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "resource-attr-val-2"}}, - }, - } -} - func initSpanAttributes(dest pdata.AttributeMap) { dest.InitFromMap(spanAttributes) } -func generateOtlpSpanAttributes() []otlpcommon.KeyValue { - return []otlpcommon.KeyValue{ - { - Key: "span-attr", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-attr-val"}}, - }, - } -} - func initSpanEventAttributes(dest pdata.AttributeMap) { dest.InitFromMap(spanEventAttributes) } -func generateOtlpSpanEventAttributes() []otlpcommon.KeyValue { - return []otlpcommon.KeyValue{ - { - Key: "span-event-attr", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-event-attr-val"}}, - }, - } -} - func initSpanLinkAttributes(dest pdata.AttributeMap) { dest.InitFromMap(spanLinkAttributes) } -func generateOtlpSpanLinkAttributes() []otlpcommon.KeyValue { - return []otlpcommon.KeyValue{ - { - Key: "span-link-attr", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "span-link-attr-val"}}, - }, - } -} - func initMetricLabels1(dest pdata.StringMap) { dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1}) } -func generateOtlpMetricLabels1() []otlpcommon.StringKeyValue { - return []otlpcommon.StringKeyValue{ - { - Key: TestLabelKey1, - Value: TestLabelValue1, - }, - } -} - func initMetricLabels12(dest pdata.StringMap) { dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1, TestLabelKey2: TestLabelValue2}).Sort() } -func generateOtlpMetricLabels12() []otlpcommon.StringKeyValue { - return []otlpcommon.StringKeyValue{ - { - Key: TestLabelKey1, - Value: TestLabelValue1, - }, - { - Key: TestLabelKey2, - Value: TestLabelValue2, - }, - } -} - func initMetricLabels13(dest pdata.StringMap) { dest.InitFromMap(map[string]string{TestLabelKey1: TestLabelValue1, TestLabelKey3: TestLabelValue3}).Sort() } -func generateOtlpMetricLabels13() []otlpcommon.StringKeyValue { - return []otlpcommon.StringKeyValue{ - { - Key: TestLabelKey1, - Value: TestLabelValue1, - }, - { - Key: TestLabelKey3, - Value: TestLabelValue3, - }, - } -} - func initMetricLabels2(dest pdata.StringMap) { dest.InitFromMap(map[string]string{TestLabelKey2: TestLabelValue2}) } -func generateOtlpMetricLabels2() []otlpcommon.StringKeyValue { - return []otlpcommon.StringKeyValue{ - { - Key: TestLabelKey2, - Value: TestLabelValue2, - }, - } -} - func initMetricAttachment(dest pdata.StringMap) { dest.InitFromMap(map[string]string{TestAttachmentKey: TestAttachmentValue}) } - -func generateOtlpMetricAttachment() []otlpcommon.StringKeyValue { - return []otlpcommon.StringKeyValue{ - { - Key: TestAttachmentKey, - Value: TestAttachmentValue, - }, - } -} diff --git a/internal/otel_collector/internal/testdata/log.go b/internal/otel_collector/internal/testdata/log.go index e9fb22900da..cd055885cc9 100644 --- a/internal/otel_collector/internal/testdata/log.go +++ b/internal/otel_collector/internal/testdata/log.go @@ -17,11 +17,7 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/data" - otlpcollectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpcommon "go.opentelemetry.io/collector/internal/data/protogen/common/v1" - otlplogs "go.opentelemetry.io/collector/internal/data/protogen/logs/v1" + "go.opentelemetry.io/collector/model/pdata" ) var ( @@ -35,30 +31,12 @@ func GenerateLogsOneEmptyResourceLogs() pdata.Logs { return ld } -func generateLogsOtlpOneEmptyResourceLogs() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - {}, - }, - } -} - func GenerateLogsNoLogRecords() pdata.Logs { ld := GenerateLogsOneEmptyResourceLogs() initResource1(ld.ResourceLogs().At(0).Resource()) return ld } -func generateLogOtlpNoLogRecords() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - Resource: generateOtlpResource1(), - }, - }, - } -} - func GenerateLogsOneEmptyLogRecord() pdata.Logs { ld := GenerateLogsNoLogRecords() rs0 := ld.ResourceLogs().At(0) @@ -66,23 +44,6 @@ func GenerateLogsOneEmptyLogRecord() pdata.Logs { return ld } -func generateLogsOtlpOneEmptyLogRecord() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - {}, - }, - }, - }, - }, - }, - } -} - func GenerateLogsOneLogRecordNoResource() pdata.Logs { ld := GenerateLogsOneEmptyResourceLogs() rs0 := ld.ResourceLogs().At(0) @@ -90,45 +51,12 @@ func GenerateLogsOneLogRecordNoResource() pdata.Logs { return ld } -func generateLogsOtlpOneLogRecordNoResource() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - generateOtlpLogOne(), - }, - }, - }, - }, - }, - } -} - func GenerateLogsOneLogRecord() pdata.Logs { ld := GenerateLogsOneEmptyLogRecord() fillLogOne(ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs().At(0)) return ld } -func generateLogsOtlpOneLogRecord() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - generateOtlpLogOne(), - }, - }, - }, - }, - }, - } -} - func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() @@ -137,24 +65,6 @@ func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { return ld } -func generateLogsOtlpTwoLogRecordsSameResource() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - generateOtlpLogOne(), - generateOtlpLogTwo(), - }, - }, - }, - }, - }, - } -} - func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { ld := pdata.NewLogs() rl0 := ld.ResourceLogs().AppendEmpty() @@ -167,35 +77,6 @@ func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { fillLogThree(rl1.InstrumentationLibraryLogs().AppendEmpty().Logs().AppendEmpty()) return ld } - -func generateLogsOtlpTwoLogRecordsSameResourceOneDifferent() *otlpcollectorlog.ExportLogsServiceRequest { - return &otlpcollectorlog.ExportLogsServiceRequest{ - ResourceLogs: []*otlplogs.ResourceLogs{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - generateOtlpLogOne(), - generateOtlpLogTwo(), - }, - }, - }, - }, - { - Resource: generateOtlpResource2(), - InstrumentationLibraryLogs: []*otlplogs.InstrumentationLibraryLogs{ - { - Logs: []*otlplogs.LogRecord{ - generateOtlpLogThree(), - }, - }, - }, - }, - }, - } -} - func fillLogOne(log pdata.LogRecord) { log.SetName("logA") log.SetTimestamp(TestLogTimestamp) @@ -212,29 +93,6 @@ func fillLogOne(log pdata.LogRecord) { log.Body().SetStringVal("This is a log message") } -func generateOtlpLogOne() *otlplogs.LogRecord { - return &otlplogs.LogRecord{ - Name: "logA", - TimeUnixNano: uint64(TestLogTimestamp), - DroppedAttributesCount: 1, - SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO, - SeverityText: "Info", - Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "This is a log message"}}, - SpanId: data.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08}), - TraceId: data.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01}), - Attributes: []otlpcommon.KeyValue{ - { - Key: "app", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "server"}}, - }, - { - Key: "instance_num", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: 1}}, - }, - }, - } -} - func fillLogTwo(log pdata.LogRecord) { log.SetName("logB") log.SetTimestamp(TestLogTimestamp) @@ -249,27 +107,6 @@ func fillLogTwo(log pdata.LogRecord) { log.Body().SetStringVal("something happened") } -func generateOtlpLogTwo() *otlplogs.LogRecord { - return &otlplogs.LogRecord{ - Name: "logB", - TimeUnixNano: uint64(TestLogTimestamp), - DroppedAttributesCount: 1, - SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO, - SeverityText: "Info", - Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "something happened"}}, - Attributes: []otlpcommon.KeyValue{ - { - Key: "customer", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "acme"}}, - }, - { - Key: "env", - Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "dev"}}, - }, - }, - } -} - func fillLogThree(log pdata.LogRecord) { log.SetName("logC") log.SetTimestamp(TestLogTimestamp) @@ -280,23 +117,18 @@ func fillLogThree(log pdata.LogRecord) { log.Body().SetStringVal("something else happened") } -func generateOtlpLogThree() *otlplogs.LogRecord { - return &otlplogs.LogRecord{ - Name: "logC", - TimeUnixNano: uint64(TestLogTimestamp), - DroppedAttributesCount: 1, - SeverityNumber: otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN, - SeverityText: "Warning", - Body: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "something else happened"}}, - } -} - func GenerateLogsManyLogRecordsSameResource(count int) pdata.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).InstrumentationLibraryLogs().At(0).Logs() - logs.Resize(count) + logs.EnsureCapacity(count) for i := 0; i < count; i++ { - l := logs.At(i) + var l pdata.LogRecord + if i < logs.Len() { + l = logs.At(i) + } else { + l = logs.AppendEmpty() + } + if i%2 == 0 { fillLogOne(l) } else { diff --git a/internal/otel_collector/internal/testdata/metric.go b/internal/otel_collector/internal/testdata/metric.go index 28204e65682..d2a9269380c 100644 --- a/internal/otel_collector/internal/testdata/metric.go +++ b/internal/otel_collector/internal/testdata/metric.go @@ -17,9 +17,7 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/consumer/pdata" - otlpcollectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlpmetrics "go.opentelemetry.io/collector/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/model/pdata" ) var ( @@ -49,14 +47,6 @@ func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { return md } -func generateMetricsOtlpOneEmptyResourceMetrics() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - {}, - }, - } -} - func GenerateMetricsNoLibraries() pdata.Metrics { md := GenerateMetricsOneEmptyResourceMetrics() ms0 := md.ResourceMetrics().At(0) @@ -64,36 +54,12 @@ func GenerateMetricsNoLibraries() pdata.Metrics { return md } -func generateMetricsOtlpNoLibraries() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - }, - }, - } -} - func GenerateMetricsOneEmptyInstrumentationLibrary() pdata.Metrics { md := GenerateMetricsNoLibraries() md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().AppendEmpty() return md } -// generateMetricsOtlpOneEmptyInstrumentationLibrary returns the OTLP representation of the GenerateMetricsOneEmptyInstrumentationLibrary. -func generateMetricsOtlpOneEmptyInstrumentationLibrary() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - {}, - }, - }, - }, - } -} - func GenerateMetricsOneMetricNoResource() pdata.Metrics { md := GenerateMetricsOneEmptyResourceMetrics() rm0 := md.ResourceMetrics().At(0) @@ -102,22 +68,6 @@ func GenerateMetricsOneMetricNoResource() pdata.Metrics { return md } -func generateMetricsOtlpOneMetricNoResource() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - generateOtlpCounterIntMetric(), - }, - }, - }, - }, - }, - } -} - func GenerateMetricsOneMetric() pdata.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) @@ -125,23 +75,6 @@ func GenerateMetricsOneMetric() pdata.Metrics { return md } -func generateMetricsOtlpOneMetric() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - generateOtlpCounterIntMetric(), - }, - }, - }, - }, - }, - } -} - func GenerateMetricsOneMetricOneDataPoint() pdata.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rm0ils0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) @@ -165,24 +98,6 @@ func GenerateMetricsOneCounterOneSummaryMetrics() pdata.Metrics { return md } -func generateMetricsOtlpTwoMetrics() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - generateOtlpCounterIntMetric(), - generateOtlpCounterIntMetric(), - }, - }, - }, - }, - }, - } -} - func GenerateMetricsOneMetricNoLabels() pdata.Metrics { md := GenerateMetricsOneMetric() dps := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0).IntSum().DataPoints() @@ -191,21 +106,13 @@ func GenerateMetricsOneMetricNoLabels() pdata.Metrics { return md } -func generateMetricsOtlpOneMetricNoLabels() *otlpcollectormetrics.ExportMetricsServiceRequest { - md := generateMetricsOtlpOneMetric() - mis := md.ResourceMetrics[0].InstrumentationLibraryMetrics[0].Metrics[0].Data.(*otlpmetrics.Metric_IntSum).IntSum - mis.DataPoints[0].Labels = nil - mis.DataPoints[1].Labels = nil - return md -} - func GenerateMetricsAllTypesNoDataPoints() pdata.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) ms := ilm0.Metrics() - initMetric(ms.AppendEmpty(), TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge) + initMetric(ms.AppendEmpty(), TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) initMetric(ms.AppendEmpty(), TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge) - initMetric(ms.AppendEmpty(), TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + initMetric(ms.AppendEmpty(), TestCounterDoubleMetricName, pdata.MetricDataTypeSum) initMetric(ms.AppendEmpty(), TestCounterIntMetricName, pdata.MetricDataTypeIntSum) initMetric(ms.AppendEmpty(), TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) initMetric(ms.AppendEmpty(), TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) @@ -219,14 +126,14 @@ func GenerateMetricsAllTypesEmptyDataPoint() pdata.Metrics { ms := ilm0.Metrics() doubleGauge := ms.AppendEmpty() - initMetric(doubleGauge, TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge) - doubleGauge.DoubleGauge().DataPoints().AppendEmpty() + initMetric(doubleGauge, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + doubleGauge.Gauge().DataPoints().AppendEmpty() intGauge := ms.AppendEmpty() initMetric(intGauge, TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge) intGauge.IntGauge().DataPoints().AppendEmpty() doubleSum := ms.AppendEmpty() - initMetric(doubleSum, TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) - doubleSum.DoubleSum().DataPoints().AppendEmpty() + initMetric(doubleSum, TestCounterDoubleMetricName, pdata.MetricDataTypeSum) + doubleSum.Sum().DataPoints().AppendEmpty() intSum := ms.AppendEmpty() initMetric(intSum, TestCounterIntMetricName, pdata.MetricDataTypeIntSum) intSum.IntSum().DataPoints().AppendEmpty() @@ -249,29 +156,6 @@ func GenerateMetricsMetricTypeInvalid() pdata.Metrics { return md } -func generateMetricsOtlpAllTypesNoDataPoints() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - generateOtlpMetric(TestGaugeDoubleMetricName, pdata.MetricDataTypeDoubleGauge), - generateOtlpMetric(TestGaugeIntMetricName, pdata.MetricDataTypeIntGauge), - generateOtlpMetric(TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum), - generateOtlpMetric(TestCounterIntMetricName, pdata.MetricDataTypeIntSum), - generateOtlpMetric(TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram), - generateOtlpMetric(TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram), - generateOtlpMetric(TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary), - }, - }, - }, - }, - }, - } -} - func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { metricData := pdata.NewMetrics() rm := metricData.ResourceMetrics().AppendEmpty() @@ -288,27 +172,6 @@ func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { return metricData } -func generateMetricsOtlpAllTypesWithSampleDatapoints() *otlpcollectormetrics.ExportMetricsServiceRequest { - return &otlpcollectormetrics.ExportMetricsServiceRequest{ - ResourceMetrics: []*otlpmetrics.ResourceMetrics{ - { - Resource: generateOtlpResource1(), - InstrumentationLibraryMetrics: []*otlpmetrics.InstrumentationLibraryMetrics{ - { - Metrics: []*otlpmetrics.Metric{ - generateOtlpCounterIntMetric(), - generateOtlpSumDoubleMetric(), - generateOtlpDoubleHistogramMetric(), - generateOtlpIntHistogramMetric(), - generateOTLPDoubleSummaryMetric(), - }, - }, - }, - }, - }, - } -} - func initCounterIntMetric(im pdata.Metric) { initMetric(im, TestCounterIntMetricName, pdata.MetricDataTypeIntSum) @@ -335,30 +198,10 @@ func initGaugeIntMetricOneDataPoint(im pdata.Metric) { idp0.SetValue(123) } -func generateOtlpCounterIntMetric() *otlpmetrics.Metric { - m := generateOtlpMetric(TestCounterIntMetricName, pdata.MetricDataTypeIntSum) - m.Data.(*otlpmetrics.Metric_IntSum).IntSum.DataPoints = - []*otlpmetrics.IntDataPoint{ - { - Labels: generateOtlpMetricLabels1(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Value: 123, - }, - { - Labels: generateOtlpMetricLabels2(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Value: 456, - }, - } - return m -} - func initSumDoubleMetric(dm pdata.Metric) { - initMetric(dm, TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) + initMetric(dm, TestCounterDoubleMetricName, pdata.MetricDataTypeSum) - ddps := dm.DoubleSum().DataPoints() + ddps := dm.Sum().DataPoints() ddp0 := ddps.AppendEmpty() initMetricLabels12(ddp0.LabelsMap()) ddp0.SetStartTimestamp(TestMetricStartTimestamp) @@ -372,26 +215,6 @@ func initSumDoubleMetric(dm pdata.Metric) { ddp1.SetValue(4.56) } -func generateOtlpSumDoubleMetric() *otlpmetrics.Metric { - m := generateOtlpMetric(TestCounterDoubleMetricName, pdata.MetricDataTypeDoubleSum) - m.Data.(*otlpmetrics.Metric_DoubleSum).DoubleSum.DataPoints = - []*otlpmetrics.DoubleDataPoint{ - { - Labels: generateOtlpMetricLabels12(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Value: 1.23, - }, - { - Labels: generateOtlpMetricLabels13(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Value: 4.56, - }, - } - return m -} - func initDoubleHistogramMetric(hm pdata.Metric) { initMetric(hm, TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) @@ -416,37 +239,6 @@ func initDoubleHistogramMetric(hm pdata.Metric) { hdp1.SetExplicitBounds([]float64{1}) } -func generateOtlpDoubleHistogramMetric() *otlpmetrics.Metric { - m := generateOtlpMetric(TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) - m.Data.(*otlpmetrics.Metric_DoubleHistogram).DoubleHistogram.DataPoints = - []*otlpmetrics.DoubleHistogramDataPoint{ - { - Labels: generateOtlpMetricLabels13(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - }, - { - Labels: generateOtlpMetricLabels2(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - BucketCounts: []uint64{0, 1}, - ExplicitBounds: []float64{1}, - Exemplars: []otlpmetrics.DoubleExemplar{ - { - FilteredLabels: generateOtlpMetricAttachment(), - TimeUnixNano: uint64(TestMetricExemplarTimestamp), - Value: 15, - }, - }, - }, - } - return m -} - func initIntHistogramMetric(hm pdata.Metric) { initMetric(hm, TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) @@ -471,37 +263,6 @@ func initIntHistogramMetric(hm pdata.Metric) { hdp1.SetExplicitBounds([]float64{1}) } -func generateOtlpIntHistogramMetric() *otlpmetrics.Metric { - m := generateOtlpMetric(TestIntHistogramMetricName, pdata.MetricDataTypeIntHistogram) - m.Data.(*otlpmetrics.Metric_IntHistogram).IntHistogram.DataPoints = - []*otlpmetrics.IntHistogramDataPoint{ - { - Labels: generateOtlpMetricLabels13(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - }, - { - Labels: generateOtlpMetricLabels2(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - BucketCounts: []uint64{0, 1}, - ExplicitBounds: []float64{1}, - Exemplars: []otlpmetrics.IntExemplar{ - { - FilteredLabels: generateOtlpMetricAttachment(), - TimeUnixNano: uint64(TestMetricExemplarTimestamp), - Value: 15, - }, - }, - }, - } - return m -} - func initDoubleSummaryMetric(sm pdata.Metric) { initMetric(sm, TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) @@ -524,34 +285,6 @@ func initDoubleSummaryMetric(sm pdata.Metric) { quantile.SetValue(15) } -func generateOTLPDoubleSummaryMetric() *otlpmetrics.Metric { - m := generateOtlpMetric(TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) - m.Data.(*otlpmetrics.Metric_DoubleSummary).DoubleSummary.DataPoints = - []*otlpmetrics.DoubleSummaryDataPoint{ - { - Labels: generateOtlpMetricLabels13(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - }, - { - Labels: generateOtlpMetricLabels2(), - StartTimeUnixNano: uint64(TestMetricStartTimestamp), - TimeUnixNano: uint64(TestMetricTimestamp), - Count: 1, - Sum: 15, - QuantileValues: []*otlpmetrics.DoubleSummaryDataPoint_ValueAtQuantile{ - { - Quantile: 0.01, - Value: 15, - }, - }, - }, - } - return m -} - func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { m.SetName(name) m.SetDescription("") @@ -562,8 +295,8 @@ func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { sum := m.IntSum() sum.SetIsMonotonic(true) sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) - case pdata.MetricDataTypeDoubleSum: - sum := m.DoubleSum() + case pdata.MetricDataTypeSum: + sum := m.Sum() sum.SetIsMonotonic(true) sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) case pdata.MetricDataTypeIntHistogram: @@ -575,47 +308,12 @@ func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { } } -func generateOtlpMetric(name string, ty pdata.MetricDataType) *otlpmetrics.Metric { - m := &otlpmetrics.Metric{ - Name: name, - Description: "", - Unit: "1", - } - switch ty { - case pdata.MetricDataTypeIntGauge: - m.Data = &otlpmetrics.Metric_IntGauge{IntGauge: &otlpmetrics.IntGauge{}} - case pdata.MetricDataTypeDoubleGauge: - m.Data = &otlpmetrics.Metric_DoubleGauge{DoubleGauge: &otlpmetrics.DoubleGauge{}} - case pdata.MetricDataTypeIntSum: - m.Data = &otlpmetrics.Metric_IntSum{IntSum: &otlpmetrics.IntSum{ - IsMonotonic: true, - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }} - case pdata.MetricDataTypeDoubleSum: - m.Data = &otlpmetrics.Metric_DoubleSum{DoubleSum: &otlpmetrics.DoubleSum{ - IsMonotonic: true, - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }} - case pdata.MetricDataTypeIntHistogram: - m.Data = &otlpmetrics.Metric_IntHistogram{IntHistogram: &otlpmetrics.IntHistogram{ - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }} - case pdata.MetricDataTypeHistogram: - m.Data = &otlpmetrics.Metric_DoubleHistogram{DoubleHistogram: &otlpmetrics.DoubleHistogram{ - AggregationTemporality: otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - }} - case pdata.MetricDataTypeSummary: - m.Data = &otlpmetrics.Metric_DoubleSummary{DoubleSummary: &otlpmetrics.DoubleSummary{}} - } - return m -} - func GenerateMetricsManyMetricsSameResource(metricsCount int) pdata.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rs0ilm0 := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0) - rs0ilm0.Metrics().Resize(metricsCount) + rs0ilm0.Metrics().EnsureCapacity(metricsCount) for i := 0; i < metricsCount; i++ { - initCounterIntMetric(rs0ilm0.Metrics().At(i)) + initCounterIntMetric(rs0ilm0.Metrics().AppendEmpty()) } return md } diff --git a/internal/otel_collector/internal/testdata/resource.go b/internal/otel_collector/internal/testdata/resource.go index 90158d1caab..f5d2b8f726d 100644 --- a/internal/otel_collector/internal/testdata/resource.go +++ b/internal/otel_collector/internal/testdata/resource.go @@ -15,27 +15,13 @@ package testdata import ( - otlpresource "go.opentelemetry.io/collector/internal/data/protogen/resource/v1" - - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) func initResource1(r pdata.Resource) { initResourceAttributes1(r.Attributes()) } -func generateOtlpResource1() otlpresource.Resource { - return otlpresource.Resource{ - Attributes: generateOtlpResourceAttributes1(), - } -} - func initResource2(r pdata.Resource) { initResourceAttributes2(r.Attributes()) } - -func generateOtlpResource2() otlpresource.Resource { - return otlpresource.Resource{ - Attributes: generateOtlpResourceAttributes2(), - } -} diff --git a/internal/otel_collector/internal/testdata/trace.go b/internal/otel_collector/internal/testdata/trace.go index c06ae0ae655..684f80b51fa 100644 --- a/internal/otel_collector/internal/testdata/trace.go +++ b/internal/otel_collector/internal/testdata/trace.go @@ -17,9 +17,7 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/consumer/pdata" - otlpcollectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" + "go.opentelemetry.io/collector/model/pdata" ) var ( @@ -39,14 +37,6 @@ func GenerateTracesOneEmptyResourceSpans() pdata.Traces { return td } -func generateTracesOtlpOneEmptyResourceSpans() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - {}, - }, - } -} - func GenerateTracesNoLibraries() pdata.Traces { td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) @@ -54,35 +44,12 @@ func GenerateTracesNoLibraries() pdata.Traces { return td } -func generateTracesOtlpNoLibraries() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - Resource: generateOtlpResource1(), - }, - }, - } -} - func GenerateTracesOneEmptyInstrumentationLibrary() pdata.Traces { td := GenerateTracesNoLibraries() td.ResourceSpans().At(0).InstrumentationLibrarySpans().AppendEmpty() return td } -func generateTracesOtlpOneEmptyInstrumentationLibrary() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - Resource: generateOtlpResource1(), - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - {}, - }, - }, - }, - } -} - func GenerateTracesOneSpanNoResource() pdata.Traces { td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) @@ -90,22 +57,6 @@ func GenerateTracesOneSpanNoResource() pdata.Traces { return td } -func generateTracesOtlpOneSpanNoResource() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - generateOtlpSpanOne(), - }, - }, - }, - }, - }, - } -} - func GenerateTracesOneSpan() pdata.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) @@ -113,23 +64,6 @@ func GenerateTracesOneSpan() pdata.Traces { return td } -func generateTracesOtlpOneSpan() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - Resource: generateOtlpResource1(), - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - generateOtlpSpanOne(), - }, - }, - }, - }, - }, - } -} - func GenerateTracesTwoSpansSameResource() pdata.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) @@ -138,25 +72,6 @@ func GenerateTracesTwoSpansSameResource() pdata.Traces { return td } -// generateTracesOtlpSameResourceTwoSpans returns the OTLP representation of the generateTracesOtlpSameResourceTwoSpans. -func generateTracesOtlpSameResourceTwoSpans() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - Resource: generateOtlpResource1(), - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - generateOtlpSpanOne(), - generateOtlpSpanTwo(), - }, - }, - }, - }, - }, - } -} - func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { td := pdata.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() @@ -171,44 +86,16 @@ func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { return td } -func GenerateTracesManySpansSameResource(spansCount int) pdata.Traces { +func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0) - rs0ils0.Spans().Resize(spansCount) - for i := 0; i < spansCount; i++ { - fillSpanOne(rs0ils0.Spans().At(i)) + rs0ils0.Spans().EnsureCapacity(spanCount) + for i := 0; i < spanCount; i++ { + fillSpanOne(rs0ils0.Spans().AppendEmpty()) } return td } -func generateTracesOtlpTwoSpansSameResourceOneDifferent() *otlpcollectortrace.ExportTraceServiceRequest { - return &otlpcollectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*otlptrace.ResourceSpans{ - { - Resource: generateOtlpResource1(), - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - generateOtlpSpanOne(), - generateOtlpSpanTwo(), - }, - }, - }, - }, - { - Resource: generateOtlpResource2(), - InstrumentationLibrarySpans: []*otlptrace.InstrumentationLibrarySpans{ - { - Spans: []*otlptrace.Span{ - generateOtlpSpanThree(), - }, - }, - }, - }, - }, - } -} - func fillSpanOne(span pdata.Span) { span.SetName("operationA") span.SetStartTimestamp(TestSpanStartTimestamp) @@ -230,34 +117,6 @@ func fillSpanOne(span pdata.Span) { status.SetMessage("status-cancelled") } -func generateOtlpSpanOne() *otlptrace.Span { - return &otlptrace.Span{ - Name: "operationA", - StartTimeUnixNano: uint64(TestSpanStartTimestamp), - EndTimeUnixNano: uint64(TestSpanEndTimestamp), - DroppedAttributesCount: 1, - Events: []*otlptrace.Span_Event{ - { - Name: "event-with-attr", - TimeUnixNano: uint64(TestSpanEventTimestamp), - Attributes: generateOtlpSpanEventAttributes(), - DroppedAttributesCount: 2, - }, - { - Name: "event", - TimeUnixNano: uint64(TestSpanEventTimestamp), - DroppedAttributesCount: 2, - }, - }, - DroppedEventsCount: 1, - Status: otlptrace.Status{ - Code: otlptrace.Status_STATUS_CODE_ERROR, - DeprecatedCode: otlptrace.Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, - Message: "status-cancelled", - }, - } -} - func fillSpanTwo(span pdata.Span) { span.SetName("operationB") span.SetStartTimestamp(TestSpanStartTimestamp) @@ -270,24 +129,6 @@ func fillSpanTwo(span pdata.Span) { span.SetDroppedLinksCount(3) } -func generateOtlpSpanTwo() *otlptrace.Span { - return &otlptrace.Span{ - Name: "operationB", - StartTimeUnixNano: uint64(TestSpanStartTimestamp), - EndTimeUnixNano: uint64(TestSpanEndTimestamp), - Links: []*otlptrace.Span_Link{ - { - Attributes: generateOtlpSpanLinkAttributes(), - DroppedAttributesCount: 4, - }, - { - DroppedAttributesCount: 4, - }, - }, - DroppedLinksCount: 3, - } -} - func fillSpanThree(span pdata.Span) { span.SetName("operationC") span.SetStartTimestamp(TestSpanStartTimestamp) @@ -295,13 +136,3 @@ func fillSpanThree(span pdata.Span) { initSpanAttributes(span.Attributes()) span.SetDroppedAttributesCount(5) } - -func generateOtlpSpanThree() *otlptrace.Span { - return &otlptrace.Span{ - Name: "operationC", - StartTimeUnixNano: uint64(TestSpanStartTimestamp), - EndTimeUnixNano: uint64(TestSpanEndTimestamp), - Attributes: generateOtlpSpanAttributes(), - DroppedAttributesCount: 5, - } -} diff --git a/internal/otel_collector/obsreport/obsreport.go b/internal/otel_collector/obsreport/obsreport.go index c1e12804eb7..04580c8b5f6 100644 --- a/internal/otel_collector/obsreport/obsreport.go +++ b/internal/otel_collector/obsreport/obsreport.go @@ -15,42 +15,14 @@ package obsreport import ( - "context" "strings" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" ) -var ( - okStatus = trace.Status{Code: trace.StatusCodeOK} -) - -// setParentLink tries to retrieve a span from parentCtx and if one exists -// sets its SpanID, TraceID as a link to the given child Span. -// It returns true only if it retrieved a parent span from the context. -// -// This is typically used when the parentCtx may already have a trace and is -// long lived (eg.: an gRPC stream, or TCP connection) and one desires distinct -// traces for individual operations under the long lived trace associated to -// the parentCtx. This function is a helper that encapsulates the work of -// linking the short lived trace/span to the longer one. -func setParentLink(parentCtx context.Context, childSpan *trace.Span) bool { - parentSpanFromRPC := trace.FromContext(parentCtx) - if parentSpanFromRPC == nil { - return false - } - - psc := parentSpanFromRPC.SpanContext() - childSpan.AddLink(trace.Link{ - SpanID: psc.SpanID, - TraceID: psc.TraceID, - Type: trace.LinkTypeParent, - }) - return true -} - func buildComponentPrefix(componentPrefix, configType string) string { if !strings.HasSuffix(componentPrefix, obsmetrics.NameSep) { componentPrefix += obsmetrics.NameSep @@ -61,9 +33,8 @@ func buildComponentPrefix(componentPrefix, configType string) string { return componentPrefix + configType + obsmetrics.NameSep } -func errToStatus(err error) trace.Status { +func recordError(span trace.Span, err error) { if err != nil { - return trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()} + span.SetStatus(codes.Error, err.Error()) } - return okStatus } diff --git a/internal/otel_collector/obsreport/obsreport_exporter.go b/internal/otel_collector/obsreport/obsreport_exporter.go index 784e3a4f8c6..2ce132119a9 100644 --- a/internal/otel_collector/obsreport/obsreport_exporter.go +++ b/internal/otel_collector/obsreport/obsreport_exporter.go @@ -19,7 +19,9 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtelemetry" @@ -32,6 +34,7 @@ type Exporter struct { level configtelemetry.Level spanNamePrefix string mutators []tag.Mutator + tracer trace.Tracer } // ExporterSettings are settings for creating an Exporter. @@ -46,83 +49,82 @@ func NewExporter(cfg ExporterSettings) *Exporter { level: cfg.Level, spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), mutators: []tag.Mutator{tag.Upsert(obsmetrics.TagKeyExporter, cfg.ExporterID.String(), tag.WithTTL(tag.TTLNoPropagation))}, + tracer: otel.GetTracerProvider().Tracer(cfg.ExporterID.String()), } } // StartTracesOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (eor *Exporter) StartTracesOp(ctx context.Context) context.Context { - return eor.startSpan(ctx, obsmetrics.ExportTraceDataOperationSuffix) +func (exp *Exporter) StartTracesOp(ctx context.Context) context.Context { + return exp.startSpan(ctx, obsmetrics.ExportTraceDataOperationSuffix) } // EndTracesOp completes the export operation that was started with StartTracesOp. -func (eor *Exporter) EndTracesOp(ctx context.Context, numSpans int, err error) { +func (exp *Exporter) EndTracesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) - eor.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentSpans, obsmetrics.ExporterFailedToSendSpans) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentSpans, obsmetrics.ExporterFailedToSendSpans) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) } // StartMetricsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (eor *Exporter) StartMetricsOp(ctx context.Context) context.Context { - return eor.startSpan(ctx, obsmetrics.ExportMetricsOperationSuffix) +func (exp *Exporter) StartMetricsOp(ctx context.Context) context.Context { + return exp.startSpan(ctx, obsmetrics.ExportMetricsOperationSuffix) } // EndMetricsOp completes the export operation that was started with // StartMetricsOp. -func (eor *Exporter) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { +func (exp *Exporter) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { numSent, numFailedToSend := toNumItems(numMetricPoints, err) - eor.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentMetricPoints, obsmetrics.ExporterFailedToSendMetricPoints) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentMetricPoints, obsmetrics.ExporterFailedToSendMetricPoints) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) } // StartLogsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (eor *Exporter) StartLogsOp(ctx context.Context) context.Context { - return eor.startSpan(ctx, obsmetrics.ExportLogsOperationSuffix) +func (exp *Exporter) StartLogsOp(ctx context.Context) context.Context { + return exp.startSpan(ctx, obsmetrics.ExportLogsOperationSuffix) } // EndLogsOp completes the export operation that was started with StartLogsOp. -func (eor *Exporter) EndLogsOp(ctx context.Context, numLogRecords int, err error) { +func (exp *Exporter) EndLogsOp(ctx context.Context, numLogRecords int, err error) { numSent, numFailedToSend := toNumItems(numLogRecords, err) - eor.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentLogRecords, obsmetrics.ExporterFailedToSendLogRecords) + exp.recordMetrics(ctx, numSent, numFailedToSend, obsmetrics.ExporterSentLogRecords, obsmetrics.ExporterFailedToSendLogRecords) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) } // startSpan creates the span used to trace the operation. Returning // the updated context and the created span. -func (eor *Exporter) startSpan(ctx context.Context, operationSuffix string) context.Context { - spanName := eor.spanNamePrefix + operationSuffix - ctx, _ = trace.StartSpan(ctx, spanName) +func (exp *Exporter) startSpan(ctx context.Context, operationSuffix string) context.Context { + spanName := exp.spanNamePrefix + operationSuffix + ctx, _ = exp.tracer.Start(ctx, spanName) return ctx } -func (eor *Exporter) recordMetrics(ctx context.Context, numSent, numFailedToSend int64, sentMeasure, failedToSendMeasure *stats.Int64Measure) { +func (exp *Exporter) recordMetrics(ctx context.Context, numSent, numFailedToSend int64, sentMeasure, failedToSendMeasure *stats.Int64Measure) { if obsreportconfig.Level == configtelemetry.LevelNone { return } // Ignore the error for now. This should not happen. _ = stats.RecordWithTags( ctx, - eor.mutators, + exp.mutators, sentMeasure.M(numSent), failedToSendMeasure.M(numFailedToSend)) } func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { - span := trace.FromContext(ctx) + span := trace.SpanFromContext(ctx) // End span according to errors. - if span.IsRecordingEvents() { - span.AddAttributes( - trace.Int64Attribute( - sentItemsKey, numSent), - trace.Int64Attribute( - failedToSendItemsKey, numFailedToSend), + if span.IsRecording() { + span.SetAttributes( + attribute.Int64(sentItemsKey, numSent), + attribute.Int64(failedToSendItemsKey, numFailedToSend), ) - span.SetStatus(errToStatus(err)) + recordError(span, err) } span.End() } diff --git a/internal/otel_collector/obsreport/obsreport_receiver.go b/internal/otel_collector/obsreport/obsreport_receiver.go index 7a3775f3d89..4c278e69e88 100644 --- a/internal/otel_collector/obsreport/obsreport_receiver.go +++ b/internal/otel_collector/obsreport/obsreport_receiver.go @@ -19,7 +19,9 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtelemetry" @@ -27,83 +29,46 @@ import ( "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" ) -// startReceiveOptions has the options related to starting a receive operation. -type startReceiveOptions struct { - // LongLivedCtx when true indicates that the context passed in the call - // outlives the individual receive operation. See WithLongLivedCtx() for - // more information. - LongLivedCtx bool -} - -// StartReceiveOption function applues changes to startReceiveOptions. -type StartReceiveOption func(*startReceiveOptions) - -// WithLongLivedCtx indicates that the context passed in the call outlives the -// receive operation at hand. Typically the long lived context is associated -// to a connection, eg.: a gRPC stream or a TCP connection, for which many -// batches of data are received in individual operations without a corresponding -// new context per operation. -// -// Example: -// -// func (r *receiver) ClientConnect(ctx context.Context, rcvChan <-chan pdata.Traces) { -// longLivedCtx := obsreport.ReceiverContext(ctx, r.config.Name(), r.transport, "") -// for { -// // Since the context outlives the individual receive operations call obsreport using -// // WithLongLivedCtx(). -// ctx := obsreport.StartTracesOp( -// longLivedCtx, -// r.config.Name(), -// r.transport, -// obsreport.WithLongLivedCtx()) -// -// td, ok := <-rcvChan -// var err error -// if ok { -// err = r.nextConsumer.ConsumeTraces(ctx, td) -// } -// obsreport.EndTracesOp( -// ctx, -// r.format, -// len(td.Spans), -// err) -// if !ok { -// break -// } -// } -// } -// -func WithLongLivedCtx() StartReceiveOption { - return func(opts *startReceiveOptions) { - opts.LongLivedCtx = true - } -} - // Receiver is a helper to add obersvability to a component.Receiver. type Receiver struct { - receiverID config.ComponentID - transport string + spanNamePrefix string + transport string + longLivedCtx bool + mutators []tag.Mutator + tracer trace.Tracer } // ReceiverSettings are settings for creating an Receiver. type ReceiverSettings struct { ReceiverID config.ComponentID Transport string + // LongLivedCtx when true indicates that the context passed in the call + // outlives the individual receive operation. + // Typically the long lived context is associated to a connection, + // eg.: a gRPC stream, for which many batches of data are received in individual + // operations without a corresponding new context per operation. + LongLivedCtx bool } // NewReceiver creates a new Receiver. func NewReceiver(cfg ReceiverSettings) *Receiver { return &Receiver{ - receiverID: cfg.ReceiverID, - transport: cfg.Transport, + spanNamePrefix: obsmetrics.ReceiverPrefix + cfg.ReceiverID.String(), + transport: cfg.Transport, + longLivedCtx: cfg.LongLivedCtx, + mutators: []tag.Mutator{ + tag.Upsert(obsmetrics.TagKeyReceiver, cfg.ReceiverID.String(), tag.WithTTL(tag.TTLNoPropagation)), + tag.Upsert(obsmetrics.TagKeyTransport, cfg.Transport, tag.WithTTL(tag.TTLNoPropagation)), + }, + tracer: otel.GetTracerProvider().Tracer(cfg.ReceiverID.String()), } } // StartTracesOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. -func (rec *Receiver) StartTracesOp(operationCtx context.Context, opt ...StartReceiveOption) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiveTraceDataOperationSuffix, opt...) +func (rec *Receiver) StartTracesOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiveTraceDataOperationSuffix) } // EndTracesOp completes the receive operation that was started with @@ -120,8 +85,8 @@ func (rec *Receiver) EndTracesOp( // StartLogsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. -func (rec *Receiver) StartLogsOp(operationCtx context.Context, opt ...StartReceiveOption) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverLogsOperationSuffix, opt...) +func (rec *Receiver) StartLogsOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiverLogsOperationSuffix) } // EndLogsOp completes the receive operation that was started with @@ -138,8 +103,8 @@ func (rec *Receiver) EndLogsOp( // StartMetricsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. -func (rec *Receiver) StartMetricsOp(operationCtx context.Context, opt ...StartReceiveOption) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverMetricsOperationSuffix, opt...) +func (rec *Receiver) StartMetricsOp(operationCtx context.Context) context.Context { + return rec.startOp(operationCtx, obsmetrics.ReceiverMetricsOperationSuffix) } // EndMetricsOp completes the receive operation that was started with @@ -153,53 +118,27 @@ func (rec *Receiver) EndMetricsOp( rec.endOp(receiverCtx, format, numReceivedPoints, err, config.MetricsDataType) } -// ReceiverContext adds the keys used when recording observability metrics to -// the given context returning the newly created context. This context should -// be used in related calls to the obsreport functions so metrics are properly -// recorded. -func ReceiverContext( - ctx context.Context, - receiverID config.ComponentID, - transport string, -) context.Context { - ctx, _ = tag.New(ctx, - tag.Upsert(obsmetrics.TagKeyReceiver, receiverID.String(), tag.WithTTL(tag.TTLNoPropagation)), - tag.Upsert(obsmetrics.TagKeyTransport, transport, tag.WithTTL(tag.TTLNoPropagation))) - - return ctx -} - // startOp creates the span used to trace the operation. Returning // the updated context with the created span. -func (rec *Receiver) startOp( - receiverCtx context.Context, - operationSuffix string, - opt ...StartReceiveOption, -) context.Context { - var opts startReceiveOptions - for _, o := range opt { - o(&opts) - } - - var ctx context.Context - var span *trace.Span - spanName := obsmetrics.ReceiverPrefix + rec.receiverID.String() + operationSuffix - if !opts.LongLivedCtx { - ctx, span = trace.StartSpan(receiverCtx, spanName) +func (rec *Receiver) startOp(receiverCtx context.Context, operationSuffix string) context.Context { + ctx, _ := tag.New(receiverCtx, rec.mutators...) + var span trace.Span + spanName := rec.spanNamePrefix + operationSuffix + if !rec.longLivedCtx { + ctx, span = rec.tracer.Start(ctx, spanName) } else { // Since the receiverCtx is long lived do not use it to start the span. // This way this trace ends when the EndTracesOp is called. // Here is safe to ignore the returned context since it is not used below. - _, span = trace.StartSpan(context.Background(), spanName) - - // If the long lived context has a parent span, then add it as a parent link. - setParentLink(receiverCtx, span) + _, span = rec.tracer.Start(context.Background(), spanName, trace.WithLinks(trace.Link{ + SpanContext: trace.SpanContextFromContext(receiverCtx), + })) - ctx = trace.NewContext(receiverCtx, span) + ctx = trace.ContextWithSpan(ctx, span) } if rec.transport != "" { - span.AddAttributes(trace.StringAttribute(obsmetrics.TransportKey, rec.transport)) + span.SetAttributes(attribute.String(obsmetrics.TransportKey, rec.transport)) } return ctx } @@ -219,7 +158,7 @@ func (rec *Receiver) endOp( numRefused = numReceivedItems } - span := trace.FromContext(receiverCtx) + span := trace.SpanFromContext(receiverCtx) if obsreportconfig.Level != configtelemetry.LevelNone { var acceptedMeasure, refusedMeasure *stats.Int64Measure @@ -242,7 +181,7 @@ func (rec *Receiver) endOp( } // end span according to errors - if span.IsRecordingEvents() { + if span.IsRecording() { var acceptedItemsKey, refusedItemsKey string switch dataType { case config.TracesDataType: @@ -256,15 +195,12 @@ func (rec *Receiver) endOp( refusedItemsKey = obsmetrics.RefusedLogRecordsKey } - span.AddAttributes( - trace.StringAttribute( - obsmetrics.FormatKey, format), - trace.Int64Attribute( - acceptedItemsKey, int64(numAccepted)), - trace.Int64Attribute( - refusedItemsKey, int64(numRefused)), + span.SetAttributes( + attribute.String(obsmetrics.FormatKey, format), + attribute.Int64(acceptedItemsKey, int64(numAccepted)), + attribute.Int64(refusedItemsKey, int64(numRefused)), ) - span.SetStatus(errToStatus(err)) + recordError(span, err) } span.End() } diff --git a/internal/otel_collector/obsreport/obsreport_scraper.go b/internal/otel_collector/obsreport/obsreport_scraper.go index 362b5ea312e..caa03fa32b4 100644 --- a/internal/otel_collector/obsreport/obsreport_scraper.go +++ b/internal/otel_collector/obsreport/obsreport_scraper.go @@ -19,7 +19,9 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtelemetry" @@ -45,22 +47,42 @@ func ScraperContext( return ctx } -// StartMetricsScrapeOp is called when a scrape operation is started. The +// Scraper is a helper to add observability to a component.Scraper. +type Scraper struct { + receiverID config.ComponentID + scraper config.ComponentID + tracer trace.Tracer +} + +// ScraperSettings are settings for creating a Scraper. +type ScraperSettings struct { + ReceiverID config.ComponentID + Scraper config.ComponentID +} + +// NewScraper creates a new Scraper. +func NewScraper(cfg ScraperSettings) *Scraper { + return &Scraper{ + receiverID: cfg.ReceiverID, + scraper: cfg.Scraper, + tracer: otel.GetTracerProvider().Tracer(cfg.Scraper.String()), + } +} + +// StartMetricsOp is called when a scrape operation is started. The // returned context should be used in other calls to the obsreport functions // dealing with the same scrape operation. -func StartMetricsScrapeOp( +func (s *Scraper) StartMetricsOp( scraperCtx context.Context, - receiverID config.ComponentID, - scraper config.ComponentID, ) context.Context { - spanName := obsmetrics.ScraperPrefix + receiverID.String() + obsmetrics.NameSep + scraper.String() + obsmetrics.ScraperMetricsOperationSuffix - ctx, _ := trace.StartSpan(scraperCtx, spanName) + spanName := obsmetrics.ScraperPrefix + s.receiverID.String() + obsmetrics.NameSep + s.scraper.String() + obsmetrics.ScraperMetricsOperationSuffix + ctx, _ := s.tracer.Start(scraperCtx, spanName) return ctx } -// EndMetricsScrapeOp completes the scrape operation that was started with -// StartMetricsScrapeOp. -func EndMetricsScrapeOp( +// EndMetricsOp completes the scrape operation that was started with +// StartMetricsOp. +func (s *Scraper) EndMetricsOp( scraperCtx context.Context, numScrapedMetrics int, err error, @@ -75,7 +97,7 @@ func EndMetricsScrapeOp( } } - span := trace.FromContext(scraperCtx) + span := trace.SpanFromContext(scraperCtx) if obsreportconfig.Level != configtelemetry.LevelNone { stats.Record( @@ -85,14 +107,13 @@ func EndMetricsScrapeOp( } // end span according to errors - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute(obsmetrics.FormatKey, string(config.MetricsDataType)), - trace.Int64Attribute(obsmetrics.ScrapedMetricPointsKey, int64(numScrapedMetrics)), - trace.Int64Attribute(obsmetrics.ErroredMetricPointsKey, int64(numErroredMetrics)), + if span.IsRecording() { + span.SetAttributes( + attribute.String(obsmetrics.FormatKey, string(config.MetricsDataType)), + attribute.Int64(obsmetrics.ScrapedMetricPointsKey, int64(numScrapedMetrics)), + attribute.Int64(obsmetrics.ErroredMetricPointsKey, int64(numErroredMetrics)), ) - - span.SetStatus(errToStatus(err)) + recordError(span, err) } span.End() diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_log.go b/internal/otel_collector/processor/attributesprocessor/attributes_log.go index 83b5174ab0d..73585fb5d78 100644 --- a/internal/otel_collector/processor/attributesprocessor/attributes_log.go +++ b/internal/otel_collector/processor/attributesprocessor/attributes_log.go @@ -17,8 +17,8 @@ package attributesprocessor import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterlog" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -39,8 +39,7 @@ func newLogAttributesProcessor(attrProc *processorhelper.AttrProc, include, excl } } -// ProcessLogs implements the LogsProcessor -func (a *logAttributesProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (a *logAttributesProcessor) processLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { rs := rls.At(i) diff --git a/internal/otel_collector/processor/attributesprocessor/attributes_trace.go b/internal/otel_collector/processor/attributesprocessor/attributes_trace.go index 06f246cf4a3..7480d1138c1 100644 --- a/internal/otel_collector/processor/attributesprocessor/attributes_trace.go +++ b/internal/otel_collector/processor/attributesprocessor/attributes_trace.go @@ -17,8 +17,8 @@ package attributesprocessor import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterspan" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -39,8 +39,7 @@ func newSpanAttributesProcessor(attrProc *processorhelper.AttrProc, include, exc } } -// ProcessTraces implements the TProcessor -func (a *spanAttributesProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (a *spanAttributesProcessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) diff --git a/internal/otel_collector/processor/attributesprocessor/factory.go b/internal/otel_collector/processor/attributesprocessor/factory.go index ef4aa835fc1..9f90d4d9fa3 100644 --- a/internal/otel_collector/processor/attributesprocessor/factory.go +++ b/internal/otel_collector/processor/attributesprocessor/factory.go @@ -75,7 +75,7 @@ func createTracesProcessor( return processorhelper.NewTracesProcessor( cfg, nextConsumer, - newSpanAttributesProcessor(attrProc, include, exclude), + newSpanAttributesProcessor(attrProc, include, exclude).processTraces, processorhelper.WithCapabilities(processorCapabilities)) } @@ -105,6 +105,6 @@ func createLogProcessor( return processorhelper.NewLogsProcessor( cfg, nextConsumer, - newLogAttributesProcessor(attrProc, include, exclude), + newLogAttributesProcessor(attrProc, include, exclude).processLogs, processorhelper.WithCapabilities(processorCapabilities)) } diff --git a/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml b/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml index 3565ee53ac6..d31c585aa8c 100644 --- a/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml +++ b/internal/otel_collector/processor/attributesprocessor/testdata/config.yaml @@ -294,7 +294,7 @@ processors: match_type: regexp attributes: # This attribute ('db.statement') must exist in the span and match the regex ('SELECT \* FROM USERS.*') for a match. - - {key: env, value: "'SELECT * FROM USERS WHERE ID=1'"} + - {key: env, value: 'SELECT \* FROM USERS.*'} actions: - key: db.statement action: update diff --git a/internal/otel_collector/processor/batchprocessor/batch_processor.go b/internal/otel_collector/processor/batchprocessor/batch_processor.go index 32720ff186a..bcde61c8521 100644 --- a/internal/otel_collector/processor/batchprocessor/batch_processor.go +++ b/internal/otel_collector/processor/batchprocessor/batch_processor.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // batch_processor is a component that accepts spans and metrics, places them @@ -296,7 +296,7 @@ func (bm *batchMetrics) size() int { func (bm *batchMetrics) add(item interface{}) { md := item.(pdata.Metrics) - _, newDataPointCount := md.MetricAndDataPointCount() + newDataPointCount := md.DataPointCount() if newDataPointCount == 0 { return } diff --git a/internal/otel_collector/processor/batchprocessor/splitlogs.go b/internal/otel_collector/processor/batchprocessor/splitlogs.go index e8e6169a120..2d6637dd1dc 100644 --- a/internal/otel_collector/processor/batchprocessor/splitlogs.go +++ b/internal/otel_collector/processor/batchprocessor/splitlogs.go @@ -15,7 +15,7 @@ package batchprocessor import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // splitLogs removes logrecords from the input data and returns a new data of the specified size. @@ -46,7 +46,7 @@ func splitLogs(size int, src pdata.Logs) pdata.Logs { // If possible to move all metrics do that. srcLogsLen := srcIlm.Logs().Len() - if size-totalCopiedLogs >= srcLogsLen { + if size >= srcLogsLen+totalCopiedLogs { totalCopiedLogs += srcLogsLen srcIlm.Logs().MoveAndAppendTo(destIlm.Logs()) return true diff --git a/internal/otel_collector/processor/batchprocessor/splitmetrics.go b/internal/otel_collector/processor/batchprocessor/splitmetrics.go index 06afdc975d2..39e75d87f87 100644 --- a/internal/otel_collector/processor/batchprocessor/splitmetrics.go +++ b/internal/otel_collector/processor/batchprocessor/splitmetrics.go @@ -15,12 +15,12 @@ package batchprocessor import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // splitMetrics removes metrics from the input data and returns a new data of the specified size. func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { - _, dataPoints := src.MetricAndDataPointCount() + dataPoints := src.DataPointCount() if dataPoints <= size { return src } @@ -84,12 +84,12 @@ func metricDataPointCount(ms pdata.Metric) (dataPointCount int) { switch ms.DataType() { case pdata.MetricDataTypeIntGauge: dataPointCount = ms.IntGauge().DataPoints().Len() - case pdata.MetricDataTypeDoubleGauge: - dataPointCount = ms.DoubleGauge().DataPoints().Len() + case pdata.MetricDataTypeGauge: + dataPointCount = ms.Gauge().DataPoints().Len() case pdata.MetricDataTypeIntSum: dataPointCount = ms.IntSum().DataPoints().Len() - case pdata.MetricDataTypeDoubleSum: - dataPointCount = ms.DoubleSum().DataPoints().Len() + case pdata.MetricDataTypeSum: + dataPointCount = ms.Sum().DataPoints().Len() case pdata.MetricDataTypeIntHistogram: dataPointCount = ms.IntHistogram().DataPoints().Len() case pdata.MetricDataTypeHistogram: @@ -103,47 +103,88 @@ func metricDataPointCount(ms pdata.Metric) (dataPointCount int) { // splitMetric removes metric points from the input data and moves data of the specified size to destination. // Returns size of moved data and boolean describing, whether the metric should be removed from original slice. func splitMetric(ms, dest pdata.Metric, size int) (int, bool) { - ms.CopyTo(dest) if metricDataPointCount(ms) <= size { + ms.CopyTo(dest) return metricDataPointCount(ms), true } msSize, i := metricDataPointCount(ms)-size, 0 filterDataPoints := func() bool { i++; return i <= msSize } + + dest.SetDataType(ms.DataType()) + dest.SetName(ms.Name()) + dest.SetDescription(ms.Description()) + dest.SetUnit(ms.Unit()) + switch ms.DataType() { case pdata.MetricDataTypeIntGauge: - dest.IntGauge().DataPoints().Resize(size) - ms.IntGauge().DataPoints().RemoveIf(func(_ pdata.IntDataPoint) bool { + src := ms.IntGauge().DataPoints() + dst := dest.IntGauge().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.IntDataPoint) bool { return filterDataPoints() }) - case pdata.MetricDataTypeDoubleGauge: - dest.DoubleGauge().DataPoints().Resize(size) - ms.DoubleGauge().DataPoints().RemoveIf(func(_ pdata.DoubleDataPoint) bool { + case pdata.MetricDataTypeGauge: + src := ms.Gauge().DataPoints() + dst := dest.Gauge().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.DoubleDataPoint) bool { return filterDataPoints() }) case pdata.MetricDataTypeIntSum: - dest.IntSum().DataPoints().Resize(size) - ms.IntSum().DataPoints().RemoveIf(func(_ pdata.IntDataPoint) bool { + src := ms.IntSum().DataPoints() + dst := dest.IntSum().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.IntDataPoint) bool { return filterDataPoints() }) - case pdata.MetricDataTypeDoubleSum: - dest.DoubleSum().DataPoints().Resize(size) - ms.DoubleSum().DataPoints().RemoveIf(func(_ pdata.DoubleDataPoint) bool { + case pdata.MetricDataTypeSum: + src := ms.Sum().DataPoints() + dst := dest.Sum().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.DoubleDataPoint) bool { return filterDataPoints() }) case pdata.MetricDataTypeIntHistogram: - dest.IntHistogram().DataPoints().Resize(size) - ms.IntHistogram().DataPoints().RemoveIf(func(_ pdata.IntHistogramDataPoint) bool { + src := ms.IntHistogram().DataPoints() + dst := dest.IntHistogram().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.IntHistogramDataPoint) bool { return filterDataPoints() }) case pdata.MetricDataTypeHistogram: - dest.Histogram().DataPoints().Resize(size) - ms.Histogram().DataPoints().RemoveIf(func(_ pdata.HistogramDataPoint) bool { + src := ms.Histogram().DataPoints() + dst := dest.Histogram().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.HistogramDataPoint) bool { return filterDataPoints() }) case pdata.MetricDataTypeSummary: - dest.Summary().DataPoints().Resize(size) - ms.Summary().DataPoints().RemoveIf(func(_ pdata.SummaryDataPoint) bool { + src := ms.Summary().DataPoints() + dst := dest.Summary().DataPoints() + dst.EnsureCapacity(size) + for j := 0; j < size; j++ { + src.At(j).CopyTo(dst.AppendEmpty()) + } + src.RemoveIf(func(_ pdata.SummaryDataPoint) bool { return filterDataPoints() }) } diff --git a/internal/otel_collector/processor/batchprocessor/splittraces.go b/internal/otel_collector/processor/batchprocessor/splittraces.go index 1e62a957fb2..46ff7f933ac 100644 --- a/internal/otel_collector/processor/batchprocessor/splittraces.go +++ b/internal/otel_collector/processor/batchprocessor/splittraces.go @@ -15,7 +15,7 @@ package batchprocessor import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // splitTraces removes spans from the input trace and returns a new trace of the specified size. diff --git a/internal/otel_collector/processor/filterprocessor/factory.go b/internal/otel_collector/processor/filterprocessor/factory.go index ccf44da4972..bc2c31aa7c6 100644 --- a/internal/otel_collector/processor/filterprocessor/factory.go +++ b/internal/otel_collector/processor/filterprocessor/factory.go @@ -57,6 +57,6 @@ func createMetricsProcessor( return processorhelper.NewMetricsProcessor( cfg, nextConsumer, - fp, + fp.processMetrics, processorhelper.WithCapabilities(processorCapabilities)) } diff --git a/internal/otel_collector/processor/filterprocessor/filter_processor.go b/internal/otel_collector/processor/filterprocessor/filter_processor.go index b94dfb5b0a9..814bec5398b 100644 --- a/internal/otel_collector/processor/filterprocessor/filter_processor.go +++ b/internal/otel_collector/processor/filterprocessor/filter_processor.go @@ -19,11 +19,11 @@ import ( "go.uber.org/zap" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterconfig" "go.opentelemetry.io/collector/internal/processor/filtermatcher" "go.opentelemetry.io/collector/internal/processor/filtermetric" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -113,8 +113,8 @@ func createMatcher(mp *filtermetric.MatchProperties) (filtermetric.Matcher, filt return nameMatcher, attributeMatcher, err } -// ProcessMetrics filters the given metrics based off the filterMetricProcessor's filters. -func (fmp *filterMetricProcessor) ProcessMetrics(_ context.Context, pdm pdata.Metrics) (pdata.Metrics, error) { +// processMetrics filters the given metrics based off the filterMetricProcessor's filters. +func (fmp *filterMetricProcessor) processMetrics(_ context.Context, pdm pdata.Metrics) (pdata.Metrics, error) { pdm.ResourceMetrics().RemoveIf(func(rm pdata.ResourceMetrics) bool { keepMetricsForResource := fmp.shouldKeepMetricsForResource(rm.Resource()) if !keepMetricsForResource { diff --git a/internal/otel_collector/processor/memorylimiter/factory.go b/internal/otel_collector/processor/memorylimiter/factory.go index 934d50a62e9..682a48315d4 100644 --- a/internal/otel_collector/processor/memorylimiter/factory.go +++ b/internal/otel_collector/processor/memorylimiter/factory.go @@ -61,7 +61,7 @@ func createTracesProcessor( return processorhelper.NewTracesProcessor( cfg, nextConsumer, - ml, + ml.processTraces, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithShutdown(ml.shutdown)) } @@ -79,7 +79,7 @@ func createMetricsProcessor( return processorhelper.NewMetricsProcessor( cfg, nextConsumer, - ml, + ml.processMetrics, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithShutdown(ml.shutdown)) } @@ -97,7 +97,7 @@ func createLogsProcessor( return processorhelper.NewLogsProcessor( cfg, nextConsumer, - ml, + ml.processLogs, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithShutdown(ml.shutdown)) } diff --git a/internal/otel_collector/processor/memorylimiter/memorylimiter.go b/internal/otel_collector/processor/memorylimiter/memorylimiter.go index 403de7e7190..29524facb5c 100644 --- a/internal/otel_collector/processor/memorylimiter/memorylimiter.go +++ b/internal/otel_collector/processor/memorylimiter/memorylimiter.go @@ -25,9 +25,9 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/iruntime" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" - "go.opentelemetry.io/collector/processor/memorylimiter/internal/iruntime" ) const ( @@ -136,10 +136,10 @@ func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, erro return nil, fmt.Errorf("failed to get total memory, use fixed memory settings (limit_mib): %w", err) } logger.Info("Using percentage memory limiter", - zap.Int64("total_memory", totalMemory), + zap.Uint64("total_memory", totalMemory), zap.Uint32("limit_percentage", cfg.MemoryLimitPercentage), zap.Uint32("spike_limit_percentage", cfg.MemorySpikePercentage)) - return newPercentageMemUsageChecker(totalMemory, int64(cfg.MemoryLimitPercentage), int64(cfg.MemorySpikePercentage)) + return newPercentageMemUsageChecker(totalMemory, uint64(cfg.MemoryLimitPercentage), uint64(cfg.MemorySpikePercentage)) } func (ml *memoryLimiter) shutdown(context.Context) error { @@ -147,8 +147,7 @@ func (ml *memoryLimiter) shutdown(context.Context) error { return nil } -// ProcessTraces implements the TProcessor interface -func (ml *memoryLimiter) ProcessTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (ml *memoryLimiter) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { numSpans := td.SpanCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" @@ -167,9 +166,8 @@ func (ml *memoryLimiter) ProcessTraces(ctx context.Context, td pdata.Traces) (pd return td, nil } -// ProcessMetrics implements the MProcessor interface -func (ml *memoryLimiter) ProcessMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { - _, numDataPoints := md.MetricAndDataPointCount() +func (ml *memoryLimiter) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { + numDataPoints := md.DataPointCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" // it is necessary to check the pipeline to see if this is directly connected @@ -177,7 +175,6 @@ func (ml *memoryLimiter) ProcessMetrics(ctx context.Context, md pdata.Metrics) ( // assumes that the pipeline is properly configured and a receiver is on the // callstack. ml.obsrep.MetricsRefused(ctx, numDataPoints) - return md, errForcedDrop } @@ -187,8 +184,7 @@ func (ml *memoryLimiter) ProcessMetrics(ctx context.Context, md pdata.Metrics) ( return md, nil } -// ProcessLogs implements the LProcessor interface -func (ml *memoryLimiter) ProcessLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (ml *memoryLimiter) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { numRecords := ld.LogRecordCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" @@ -325,9 +321,9 @@ func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) (*memUsageChec }, nil } -func newPercentageMemUsageChecker(totalMemory int64, percentageLimit, percentageSpike int64) (*memUsageChecker, error) { +func newPercentageMemUsageChecker(totalMemory uint64, percentageLimit, percentageSpike uint64) (*memUsageChecker, error) { if percentageLimit > 100 || percentageLimit <= 0 || percentageSpike > 100 || percentageSpike <= 0 { return nil, errPercentageLimitOutOfRange } - return newFixedMemUsageChecker(uint64(percentageLimit*totalMemory)/100, uint64(percentageSpike*totalMemory)/100) + return newFixedMemUsageChecker(percentageLimit*totalMemory/100, percentageSpike*totalMemory/100) } diff --git a/internal/otel_collector/processor/probabilisticsamplerprocessor/probabilisticsampler.go b/internal/otel_collector/processor/probabilisticsamplerprocessor/probabilisticsampler.go index 0dc96479678..536f1a79a20 100644 --- a/internal/otel_collector/processor/probabilisticsamplerprocessor/probabilisticsampler.go +++ b/internal/otel_collector/processor/probabilisticsamplerprocessor/probabilisticsampler.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -65,11 +65,11 @@ func newTracesProcessor(nextConsumer consumer.Traces, cfg *Config) (component.Tr return processorhelper.NewTracesProcessor( cfg, nextConsumer, - tsp, + tsp.processTraces, processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) } -func (tsp *tracesamplerprocessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (tsp *tracesamplerprocessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { td.ResourceSpans().RemoveIf(func(rs pdata.ResourceSpans) bool { rs.InstrumentationLibrarySpans().RemoveIf(func(ils pdata.InstrumentationLibrarySpans) bool { ils.Spans().RemoveIf(func(s pdata.Span) bool { diff --git a/internal/otel_collector/processor/processorhelper/attraction.go b/internal/otel_collector/processor/processorhelper/attraction.go index 502c39bd4a0..63afdbe741b 100644 --- a/internal/otel_collector/processor/processorhelper/attraction.go +++ b/internal/otel_collector/processor/processorhelper/attraction.go @@ -19,8 +19,8 @@ import ( "regexp" "strings" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterhelper" + "go.opentelemetry.io/collector/model/pdata" ) // Settings specifies the processor settings. diff --git a/internal/otel_collector/processor/processorhelper/hasher.go b/internal/otel_collector/processor/processorhelper/hasher.go index 1ba53e1c500..87a634d43da 100644 --- a/internal/otel_collector/processor/processorhelper/hasher.go +++ b/internal/otel_collector/processor/processorhelper/hasher.go @@ -21,7 +21,7 @@ import ( "encoding/hex" "math" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const ( diff --git a/internal/otel_collector/processor/processorhelper/logs.go b/internal/otel_collector/processor/processorhelper/logs.go index ad732f17eab..7a647e32f97 100644 --- a/internal/otel_collector/processor/processorhelper/logs.go +++ b/internal/otel_collector/processor/processorhelper/logs.go @@ -18,7 +18,7 @@ import ( "context" "errors" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" @@ -26,15 +26,12 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) -// LProcessor is a helper interface that allows avoiding implementing all functions in LogsProcessor by using NewLogsProcessor. -type LProcessor interface { - // ProcessLogs is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessLogs(context.Context, pdata.Logs) (pdata.Logs, error) -} +// ProcessLogsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessLogsFunc func(context.Context, pdata.Logs) (pdata.Logs, error) type logProcessor struct { component.Component @@ -46,25 +43,25 @@ type logProcessor struct { func NewLogsProcessor( cfg config.Processor, nextConsumer consumer.Logs, - processor LProcessor, + logsFunc ProcessLogsFunc, options ...Option, ) (component.LogsProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") + if logsFunc == nil { + return nil, errors.New("nil logsFunc") } if nextConsumer == nil { return nil, componenterror.ErrNilNextConsumer } - traceAttributes := spanAttributes(cfg.ID()) + eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) logsConsumer, err := consumerhelper.NewLogs(func(ctx context.Context, ld pdata.Logs) error { - span := trace.FromContext(ctx) - span.Annotate(traceAttributes, "Start processing.") + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) var err error - ld, err = processor.ProcessLogs(ctx, ld) - span.Annotate(traceAttributes, "End processing.") + ld, err = logsFunc(ctx, ld) + span.AddEvent("End processing.", eventOptions) if err != nil { if errors.Is(err, ErrSkipProcessingData) { return nil diff --git a/internal/otel_collector/processor/processorhelper/metrics.go b/internal/otel_collector/processor/processorhelper/metrics.go index 84f186bb735..62879c61bae 100644 --- a/internal/otel_collector/processor/processorhelper/metrics.go +++ b/internal/otel_collector/processor/processorhelper/metrics.go @@ -18,7 +18,7 @@ import ( "context" "errors" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" @@ -26,15 +26,12 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) -// MProcessor is a helper interface that allows avoiding implementing all functions in MetricsProcessor by using NewTracesProcessor. -type MProcessor interface { - // ProcessMetrics is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessMetrics(context.Context, pdata.Metrics) (pdata.Metrics, error) -} +// ProcessMetricsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessMetricsFunc func(context.Context, pdata.Metrics) (pdata.Metrics, error) type metricsProcessor struct { component.Component @@ -46,25 +43,25 @@ type metricsProcessor struct { func NewMetricsProcessor( cfg config.Processor, nextConsumer consumer.Metrics, - processor MProcessor, + metricsFunc ProcessMetricsFunc, options ...Option, ) (component.MetricsProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") + if metricsFunc == nil { + return nil, errors.New("nil metricsFunc") } if nextConsumer == nil { return nil, componenterror.ErrNilNextConsumer } - traceAttributes := spanAttributes(cfg.ID()) + eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) metricsConsumer, err := consumerhelper.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { - span := trace.FromContext(ctx) - span.Annotate(traceAttributes, "Start processing.") + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) var err error - md, err = processor.ProcessMetrics(ctx, md) - span.Annotate(traceAttributes, "End processing.") + md, err = metricsFunc(ctx, md) + span.AddEvent("End processing.", eventOptions) if err != nil { if errors.Is(err, ErrSkipProcessingData) { return nil diff --git a/internal/otel_collector/processor/processorhelper/processor.go b/internal/otel_collector/processor/processorhelper/processor.go index 449e0645612..528560b206f 100644 --- a/internal/otel_collector/processor/processorhelper/processor.go +++ b/internal/otel_collector/processor/processorhelper/processor.go @@ -17,7 +17,8 @@ package processorhelper import ( "errors" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component/componenthelper" "go.opentelemetry.io/collector/config" @@ -77,8 +78,6 @@ func fromOptions(options []Option) *baseSettings { return opts } -func spanAttributes(id config.ComponentID) []trace.Attribute { - return []trace.Attribute{ - trace.StringAttribute(obsmetrics.ProcessorKey, id.String()), - } +func spanAttributes(id config.ComponentID) trace.EventOption { + return trace.WithAttributes(attribute.String(obsmetrics.ProcessorKey, id.String())) } diff --git a/internal/otel_collector/processor/processorhelper/traces.go b/internal/otel_collector/processor/processorhelper/traces.go index 24703143599..8e0bd889abc 100644 --- a/internal/otel_collector/processor/processorhelper/traces.go +++ b/internal/otel_collector/processor/processorhelper/traces.go @@ -18,7 +18,7 @@ import ( "context" "errors" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" @@ -26,15 +26,12 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumerhelper" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) -// TProcessor is a helper interface that allows avoiding implementing all functions in TracesProcessor by using NewTracesProcessor. -type TProcessor interface { - // ProcessTraces is a helper function that processes the incoming data and returns the data to be sent to the next component. - // If error is returned then returned data are ignored. It MUST not call the next component. - ProcessTraces(context.Context, pdata.Traces) (pdata.Traces, error) -} +// ProcessTracesFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. +// If error is returned then returned data are ignored. It MUST not call the next component. +type ProcessTracesFunc func(context.Context, pdata.Traces) (pdata.Traces, error) type tracesProcessor struct { component.Component @@ -46,25 +43,25 @@ type tracesProcessor struct { func NewTracesProcessor( cfg config.Processor, nextConsumer consumer.Traces, - processor TProcessor, + tracesFunc ProcessTracesFunc, options ...Option, ) (component.TracesProcessor, error) { - if processor == nil { - return nil, errors.New("nil processor") + if tracesFunc == nil { + return nil, errors.New("nil tracesFunc") } if nextConsumer == nil { return nil, componenterror.ErrNilNextConsumer } - traceAttributes := spanAttributes(cfg.ID()) + eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) traceConsumer, err := consumerhelper.NewTraces(func(ctx context.Context, td pdata.Traces) error { - span := trace.FromContext(ctx) - span.Annotate(traceAttributes, "Start processing.") + span := trace.SpanFromContext(ctx) + span.AddEvent("Start processing.", eventOptions) var err error - td, err = processor.ProcessTraces(ctx, td) - span.Annotate(traceAttributes, "End processing.") + td, err = tracesFunc(ctx, td) + span.AddEvent("End processing.", eventOptions) if err != nil { if errors.Is(err, ErrSkipProcessingData) { return nil diff --git a/internal/otel_collector/processor/resourceprocessor/factory.go b/internal/otel_collector/processor/resourceprocessor/factory.go index f3c8ff07e64..922c10b79db 100644 --- a/internal/otel_collector/processor/resourceprocessor/factory.go +++ b/internal/otel_collector/processor/resourceprocessor/factory.go @@ -57,10 +57,11 @@ func createTracesProcessor( if err != nil { return nil, err } + proc := &resourceProcessor{attrProc: attrProc} return processorhelper.NewTracesProcessor( cfg, nextConsumer, - &resourceProcessor{attrProc: attrProc}, + proc.processTraces, processorhelper.WithCapabilities(processorCapabilities)) } @@ -73,10 +74,11 @@ func createMetricsProcessor( if err != nil { return nil, err } + proc := &resourceProcessor{attrProc: attrProc} return processorhelper.NewMetricsProcessor( cfg, nextConsumer, - &resourceProcessor{attrProc: attrProc}, + proc.processMetrics, processorhelper.WithCapabilities(processorCapabilities)) } @@ -89,10 +91,11 @@ func createLogsProcessor( if err != nil { return nil, err } + proc := &resourceProcessor{attrProc: attrProc} return processorhelper.NewLogsProcessor( cfg, nextConsumer, - &resourceProcessor{attrProc: attrProc}, + proc.processLogs, processorhelper.WithCapabilities(processorCapabilities)) } diff --git a/internal/otel_collector/processor/resourceprocessor/resource_processor.go b/internal/otel_collector/processor/resourceprocessor/resource_processor.go index 7b591525074..a542df6b65f 100644 --- a/internal/otel_collector/processor/resourceprocessor/resource_processor.go +++ b/internal/otel_collector/processor/resourceprocessor/resource_processor.go @@ -17,7 +17,7 @@ package resourceprocessor import ( "context" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -25,8 +25,7 @@ type resourceProcessor struct { attrProc *processorhelper.AttrProc } -// ProcessTraces implements the TProcessor interface -func (rp *resourceProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (rp *resourceProcessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rp.attrProc.Process(rss.At(i).Resource().Attributes()) @@ -34,8 +33,7 @@ func (rp *resourceProcessor) ProcessTraces(_ context.Context, td pdata.Traces) ( return td, nil } -// ProcessMetrics implements the MProcessor interface -func (rp *resourceProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (rp *resourceProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rp.attrProc.Process(rms.At(i).Resource().Attributes()) @@ -43,8 +41,7 @@ func (rp *resourceProcessor) ProcessMetrics(_ context.Context, md pdata.Metrics) return md, nil } -// ProcessLogs implements the LProcessor interface -func (rp *resourceProcessor) ProcessLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (rp *resourceProcessor) processLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { rp.attrProc.Process(rls.At(i).Resource().Attributes()) diff --git a/internal/otel_collector/processor/spanprocessor/factory.go b/internal/otel_collector/processor/spanprocessor/factory.go index 0f6c016f8e9..f5b7aa7dcdd 100644 --- a/internal/otel_collector/processor/spanprocessor/factory.go +++ b/internal/otel_collector/processor/spanprocessor/factory.go @@ -73,6 +73,6 @@ func createTracesProcessor( return processorhelper.NewTracesProcessor( cfg, nextConsumer, - sp, + sp.processTraces, processorhelper.WithCapabilities(processorCapabilities)) } diff --git a/internal/otel_collector/processor/spanprocessor/span.go b/internal/otel_collector/processor/spanprocessor/span.go index a892d441775..9a9be8d8cfd 100644 --- a/internal/otel_collector/processor/spanprocessor/span.go +++ b/internal/otel_collector/processor/spanprocessor/span.go @@ -21,8 +21,8 @@ import ( "strconv" "strings" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterspan" + "go.opentelemetry.io/collector/model/pdata" ) type spanProcessor struct { @@ -79,7 +79,7 @@ func newSpanProcessor(config Config) (*spanProcessor, error) { return sp, nil } -func (sp *spanProcessor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (sp *spanProcessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) diff --git a/internal/otel_collector/proto_patch.sed b/internal/otel_collector/proto_patch.sed index 16cbc61ee31..a31b593e50e 100644 --- a/internal/otel_collector/proto_patch.sed +++ b/internal/otel_collector/proto_patch.sed @@ -1,4 +1,4 @@ -s+github.com/open-telemetry/opentelemetry-proto/gen/go/+go.opentelemetry.io/collector/internal/data/protogen/+g +s+github.com/open-telemetry/opentelemetry-proto/gen/go/+go.opentelemetry.io/collector/model/internal/data/protogen/+g s+package opentelemetry.proto.\(.*\).v1;+package opentelemetry.proto.\1.v1;\ \ @@ -8,14 +8,14 @@ s+bytes trace_id = \(.*\);+bytes trace_id = \1\ [\ // Use custom TraceId data type for this field.\ (gogoproto.nullable) = false,\ - (gogoproto.customtype) = "go.opentelemetry.io/collector/internal/data.TraceID"\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/model/internal/data.TraceID"\ ];+g s+bytes \(.*span_id\) = \(.*\);+bytes \1 = \2\ [\ // Use custom SpanId data type for this field.\ (gogoproto.nullable) = false,\ - (gogoproto.customtype) = "go.opentelemetry.io/collector/internal/data.SpanID"\ + (gogoproto.customtype) = "go.opentelemetry.io/collector/model/internal/data.SpanID"\ ];+g s+repeated opentelemetry.proto.common.v1.KeyValue \(.*\);+repeated opentelemetry.proto.common.v1.KeyValue \1\ diff --git a/internal/otel_collector/receiver/doc.go b/internal/otel_collector/receiver/doc.go index f9d6d670038..f2e788b1241 100644 --- a/internal/otel_collector/receiver/doc.go +++ b/internal/otel_collector/receiver/doc.go @@ -14,9 +14,5 @@ // Package receiver contains implementations of Receiver components. // -// To implement a custom receiver you will need to implement component.ReceiverFactory -// interface and component.Receiver interface. -// -// To make the custom receiver part of the Collector build the factory must be added -// to defaultcomponents.Components() function. +// A receiver must be added as a default component to be included in the collector. package receiver diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/doc.go b/internal/otel_collector/receiver/hostmetricsreceiver/doc.go new file mode 100644 index 00000000000..db945b425f7 --- /dev/null +++ b/internal/otel_collector/receiver/hostmetricsreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hostmetricsreceiver reads metrics like CPU usage, disk usage, and network usage from the host. +package hostmetricsreceiver diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/factory.go index e9721cd223e..f21028642aa 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/factory.go @@ -124,7 +124,7 @@ func createAddScraperOptions( } if ok { - scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddMetricsScraper(hostMetricsScraper)) + scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddScraper(hostMetricsScraper)) continue } @@ -134,7 +134,7 @@ func createAddScraperOptions( } if ok { - scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddResourceMetricsScraper(resourceMetricsScraper)) + scraperControllerOptions = append(scraperControllerOptions, scraperhelper.AddScraper(resourceMetricsScraper)) continue } @@ -144,7 +144,7 @@ func createAddScraperOptions( return scraperControllerOptions, nil } -func createHostMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ScraperFactory) (scraper scraperhelper.MetricsScraper, ok bool, err error) { +func createHostMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ScraperFactory) (scraper scraperhelper.Scraper, ok bool, err error) { factory := factories[key] if factory == nil { ok = false @@ -156,7 +156,7 @@ func createHostMetricsScraper(ctx context.Context, logger *zap.Logger, key strin return } -func createResourceMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ResourceScraperFactory) (scraper scraperhelper.ResourceMetricsScraper, ok bool, err error) { +func createResourceMetricsScraper(ctx context.Context, logger *zap.Logger, key string, cfg internal.Config, factories map[string]internal.ResourceScraperFactory) (scraper scraperhelper.Scraper, ok bool, err error) { factory := factories[key] if factory == nil { ok = false diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go index 0d8298ee11f..988ace499eb 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/metadata/generated_metrics.go @@ -18,7 +18,7 @@ package metadata import ( "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Type is the component type name. @@ -196,9 +196,9 @@ var Metrics = &metricStruct{ metric.SetName("process.cpu.time") metric.SetDescription("Total CPU seconds broken down by different states.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - metric.DoubleSum().SetIsMonotonic(true) - metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + metric.SetDataType(pdata.MetricDataTypeSum) + metric.Sum().SetIsMonotonic(true) + metric.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) }, }, &metricImpl{ @@ -240,7 +240,7 @@ var Metrics = &metricStruct{ metric.SetName("system.cpu.load_average.15m") metric.SetDescription("Average CPU Load over 15 minutes.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.SetDataType(pdata.MetricDataTypeGauge) }, }, &metricImpl{ @@ -249,7 +249,7 @@ var Metrics = &metricStruct{ metric.SetName("system.cpu.load_average.1m") metric.SetDescription("Average CPU Load over 1 minute.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.SetDataType(pdata.MetricDataTypeGauge) }, }, &metricImpl{ @@ -258,7 +258,7 @@ var Metrics = &metricStruct{ metric.SetName("system.cpu.load_average.5m") metric.SetDescription("Average CPU Load over 5 minutes.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.SetDataType(pdata.MetricDataTypeGauge) }, }, &metricImpl{ @@ -267,9 +267,9 @@ var Metrics = &metricStruct{ metric.SetName("system.cpu.time") metric.SetDescription("Total CPU seconds broken down by different states.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - metric.DoubleSum().SetIsMonotonic(true) - metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + metric.SetDataType(pdata.MetricDataTypeSum) + metric.Sum().SetIsMonotonic(true) + metric.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) }, }, &metricImpl{ @@ -289,9 +289,9 @@ var Metrics = &metricStruct{ metric.SetName("system.disk.io_time") metric.SetDescription("Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - metric.DoubleSum().SetIsMonotonic(true) - metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + metric.SetDataType(pdata.MetricDataTypeSum) + metric.Sum().SetIsMonotonic(true) + metric.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) }, }, &metricImpl{ @@ -311,9 +311,9 @@ var Metrics = &metricStruct{ metric.SetName("system.disk.operation_time") metric.SetDescription("Time spent in disk operations.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - metric.DoubleSum().SetIsMonotonic(true) - metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + metric.SetDataType(pdata.MetricDataTypeSum) + metric.Sum().SetIsMonotonic(true) + metric.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) }, }, &metricImpl{ @@ -344,9 +344,9 @@ var Metrics = &metricStruct{ metric.SetName("system.disk.weighted_io_time") metric.SetDescription("Time disk spent activated multiplied by the queue length.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - metric.DoubleSum().SetIsMonotonic(true) - metric.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + metric.SetDataType(pdata.MetricDataTypeSum) + metric.Sum().SetIsMonotonic(true) + metric.Sum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative) }, }, &metricImpl{ diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go index 45de375706a..b58e1210f7e 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper.go @@ -34,7 +34,7 @@ type ScraperFactory interface { // CreateMetricsScraper creates a scraper based on this config. // If the config is not valid, error will be returned instead. - CreateMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.MetricsScraper, error) + CreateMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.Scraper, error) } // ResourceScraperFactory can create a ResourceScraper. @@ -43,7 +43,7 @@ type ResourceScraperFactory interface { // CreateResourceMetricsScraper creates a resource scraper based on this // config. If the config is not valid, error will be returned instead. - CreateResourceMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.ResourceMetricsScraper, error) + CreateResourceMetricsScraper(ctx context.Context, logger *zap.Logger, cfg Config) (scraperhelper.Scraper, error) } // Config is the configuration of a scraper. diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go index 8efd0ab126a..a309c001abd 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go @@ -22,7 +22,7 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -63,18 +63,17 @@ func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { return metrics, scrapererror.NewPartialScrapeError(err, metricsLen) } - metrics.Resize(metricsLen) - initializeCPUTimeMetric(metrics.At(0), s.startTime, now, cpuTimes) + initializeCPUTimeMetric(metrics.AppendEmpty(), s.startTime, now, cpuTimes) return metrics, nil } func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, cpuTimes []cpu.TimesStat) { metadata.Metrics.SystemCPUTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(len(cpuTimes) * cpuStatesLen) - for i, cpuTime := range cpuTimes { - appendCPUTimeStateDataPoints(ddps, i*cpuStatesLen, startTime, now, cpuTime) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(len(cpuTimes) * cpuStatesLen) + for _, cpuTime := range cpuTimes { + appendCPUTimeStateDataPoints(ddps, startTime, now, cpuTime) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go index e8636fe3d28..85cfe3063f5 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go @@ -19,19 +19,19 @@ package cpuscraper import ( "github.com/shirou/gopsutil/cpu" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const cpuStatesLen = 8 -func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.Timestamp, cpuTime cpu.TimesStat) { - initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) - initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) - initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) - initializeCPUTimeDataPoint(ddps.At(startIdx+3), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) - initializeCPUTimeDataPoint(ddps.At(startIdx+4), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Nice, cpuTime.Nice) - initializeCPUTimeDataPoint(ddps.At(startIdx+5), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Softirq, cpuTime.Softirq) - initializeCPUTimeDataPoint(ddps.At(startIdx+6), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Steal, cpuTime.Steal) - initializeCPUTimeDataPoint(ddps.At(startIdx+7), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Wait, cpuTime.Iowait) +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.Timestamp, cpuTime cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Nice, cpuTime.Nice) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Softirq, cpuTime.Softirq) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Steal, cpuTime.Steal) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Wait, cpuTime.Iowait) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go index 58280779c8b..2dec5475af9 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go @@ -19,15 +19,15 @@ package cpuscraper import ( "github.com/shirou/gopsutil/cpu" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const cpuStatesLen = 4 -func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.Timestamp, cpuTime cpu.TimesStat) { - initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) - initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) - initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) - initializeCPUTimeDataPoint(ddps.At(startIdx+3), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.Timestamp, cpuTime cpu.TimesStat) { + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.User, cpuTime.User) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.System, cpuTime.System) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Idle, cpuTime.Idle) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.CPU, metadata.LabelCPUState.Interrupt, cpuTime.Irq) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go index 485e61d484a..277fdec5d83 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s := newCPUScraper(ctx, cfg) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go index 8a0e1fc1d6b..ac36ca56b62 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -25,8 +25,8 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -94,13 +94,13 @@ func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { ioCounters = s.filterByDevice(ioCounters) if len(ioCounters) > 0 { - metrics.Resize(metricsLen) - initializeDiskIOMetric(metrics.At(0), s.startTime, now, ioCounters) - initializeDiskOperationsMetric(metrics.At(1), s.startTime, now, ioCounters) - initializeDiskIOTimeMetric(metrics.At(2), s.startTime, now, ioCounters) - initializeDiskOperationTimeMetric(metrics.At(3), s.startTime, now, ioCounters) - initializeDiskPendingOperationsMetric(metrics.At(4), now, ioCounters) - appendSystemSpecificMetrics(metrics, 5, s.startTime, now, ioCounters) + metrics.EnsureCapacity(metricsLen) + initializeDiskIOMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) + initializeDiskOperationsMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) + initializeDiskIOTimeMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) + initializeDiskOperationTimeMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) + initializeDiskPendingOperationsMetric(metrics.AppendEmpty(), now, ioCounters) + appendSystemSpecificMetrics(metrics, s.startTime, now, ioCounters) } return metrics, nil @@ -110,13 +110,11 @@ func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.Timestamp, metadata.Metrics.SystemDiskIo.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCounters)) + idps.EnsureCapacity(2 * len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.ReadBytes)) - initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.WriteBytes)) - idx += 2 + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.ReadBytes)) + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.WriteBytes)) } } @@ -124,40 +122,34 @@ func initializeDiskOperationsMetric(metric pdata.Metric, startTime, now pdata.Ti metadata.Metrics.SystemDiskOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCounters)) + idps.EnsureCapacity(2 * len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.ReadCount)) - initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.WriteCount)) - idx += 2 + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.ReadCount)) + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.WriteCount)) } } func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { metadata.Metrics.SystemDiskIoTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(len(ioCounters)) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeDoubleDataPoint(ddps.At(idx+0), startTime, now, device, "", float64(ioCounter.IoTime)/1e3) - idx++ + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, device, "", float64(ioCounter.IoTime)/1e3) } } func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { metadata.Metrics.SystemDiskOperationTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(2 * len(ioCounters)) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(2 * len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeDoubleDataPoint(ddps.At(idx+0), startTime, now, device, metadata.LabelDiskDirection.Read, float64(ioCounter.ReadTime)/1e3) - initializeDoubleDataPoint(ddps.At(idx+1), startTime, now, device, metadata.LabelDiskDirection.Write, float64(ioCounter.WriteTime)/1e3) - idx += 2 + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Read, float64(ioCounter.ReadTime)/1e3) + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Write, float64(ioCounter.WriteTime)/1e3) } } @@ -165,12 +157,10 @@ func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.Timest metadata.Metrics.SystemDiskPendingOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(len(ioCounters)) + idps.EnsureCapacity(len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeDiskPendingDataPoint(idps.At(idx), now, device, int64(ioCounter.IopsInProgress)) - idx++ + initializeDiskPendingDataPoint(idps.AppendEmpty(), now, device, int64(ioCounter.IopsInProgress)) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go index 2e1ae08d5bc..d5509dfebd6 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go @@ -19,10 +19,10 @@ package diskscraper import ( "github.com/shirou/gopsutil/disk" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const systemSpecificMetricsLen = 0 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go index 97dd5f0ae04..c7c001cdd83 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go @@ -19,27 +19,25 @@ package diskscraper import ( "github.com/shirou/gopsutil/disk" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const systemSpecificMetricsLen = 2 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - initializeDiskWeightedIOTimeMetric(metrics.At(startIdx+0), startTime, now, ioCounters) - initializeDiskMergedMetric(metrics.At(startIdx+1), startTime, now, ioCounters) +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { + initializeDiskWeightedIOTimeMetric(metrics.AppendEmpty(), startTime, now, ioCounters) + initializeDiskMergedMetric(metrics.AppendEmpty(), startTime, now, ioCounters) } func initializeDiskWeightedIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { metadata.Metrics.SystemDiskWeightedIoTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(len(ioCounters)) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeDoubleDataPoint(ddps.At(idx+0), startTime, now, device, "", float64(ioCounter.WeightedIO)/1e3) - idx++ + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, device, "", float64(ioCounter.WeightedIO)/1e3) } } @@ -47,12 +45,10 @@ func initializeDiskMergedMetric(metric pdata.Metric, startTime, now pdata.Timest metadata.Metrics.SystemDiskMerged.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCounters)) + idps.EnsureCapacity(2 * len(ioCounters)) - idx := 0 for device, ioCounter := range ioCounters { - initializeInt64DataPoint(idps.At(idx+0), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.MergedReadCount)) - initializeInt64DataPoint(idps.At(idx+1), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.MergedWriteCount)) - idx += 2 + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Read, int64(ioCounter.MergedReadCount)) + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, device, metadata.LabelDiskDirection.Write, int64(ioCounter.MergedWriteCount)) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go index 0f68b3d6a29..4f5046f81e5 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -22,8 +22,8 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" "go.opentelemetry.io/collector/receiver/scrapererror" @@ -119,12 +119,12 @@ func (s *scraper) scrape(ctx context.Context) (pdata.MetricSlice, error) { } if len(logicalDiskCounterValues) > 0 { - metrics.Resize(metricsLen) - initializeDiskIOMetric(metrics.At(0), s.startTime, now, logicalDiskCounterValues) - initializeDiskOperationsMetric(metrics.At(1), s.startTime, now, logicalDiskCounterValues) - initializeDiskIOTimeMetric(metrics.At(2), s.startTime, now, logicalDiskCounterValues) - initializeDiskOperationTimeMetric(metrics.At(3), s.startTime, now, logicalDiskCounterValues) - initializeDiskPendingOperationsMetric(metrics.At(4), now, logicalDiskCounterValues) + metrics.EnsureCapacity(metricsLen) + initializeDiskIOMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) + initializeDiskOperationsMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) + initializeDiskIOTimeMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) + initializeDiskOperationTimeMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) + initializeDiskPendingOperationsMetric(metrics.AppendEmpty(), now, logicalDiskCounterValues) } return metrics, nil @@ -134,10 +134,10 @@ func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.Timestamp, metadata.Metrics.SystemDiskIo.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(logicalDiskCounterValues)) - for idx, logicalDiskCounter := range logicalDiskCounterValues { - initializeInt64DataPoint(idps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, logicalDiskCounter.Values[readBytesPerSec]) - initializeInt64DataPoint(idps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, logicalDiskCounter.Values[writeBytesPerSec]) + idps.EnsureCapacity(2 * len(logicalDiskCounterValues)) + for _, logicalDiskCounter := range logicalDiskCounterValues { + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, logicalDiskCounter.Values[readBytesPerSec]) + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, logicalDiskCounter.Values[writeBytesPerSec]) } } @@ -145,32 +145,32 @@ func initializeDiskOperationsMetric(metric pdata.Metric, startTime, now pdata.Ti metadata.Metrics.SystemDiskOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(logicalDiskCounterValues)) - for idx, logicalDiskCounter := range logicalDiskCounterValues { - initializeInt64DataPoint(idps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, logicalDiskCounter.Values[readsPerSec]) - initializeInt64DataPoint(idps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, logicalDiskCounter.Values[writesPerSec]) + idps.EnsureCapacity(2 * len(logicalDiskCounterValues)) + for _, logicalDiskCounter := range logicalDiskCounterValues { + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, logicalDiskCounter.Values[readsPerSec]) + initializeInt64DataPoint(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, logicalDiskCounter.Values[writesPerSec]) } } func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { metadata.Metrics.SystemDiskIoTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(len(logicalDiskCounterValues)) - for idx, logicalDiskCounter := range logicalDiskCounterValues { + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(len(logicalDiskCounterValues)) + for _, logicalDiskCounter := range logicalDiskCounterValues { // disk active time = system boot time - disk idle time - initializeDoubleDataPoint(ddps.At(idx), startTime, now, logicalDiskCounter.InstanceName, "", float64(now-startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7) + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, "", float64(now-startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7) } } func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { metadata.Metrics.SystemDiskOperationTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(2 * len(logicalDiskCounterValues)) - for idx, logicalDiskCounter := range logicalDiskCounterValues { - initializeDoubleDataPoint(ddps.At(2*idx+0), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7) - initializeDoubleDataPoint(ddps.At(2*idx+1), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(2 * len(logicalDiskCounterValues)) + for _, logicalDiskCounter := range logicalDiskCounterValues { + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Read, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7) + initializeDoubleDataPoint(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.LabelDiskDirection.Write, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7) } } @@ -178,9 +178,9 @@ func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.Timest metadata.Metrics.SystemDiskPendingOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(len(logicalDiskCounterValues)) - for idx, logicalDiskCounter := range logicalDiskCounterValues { - initializeDiskPendingDataPoint(idps.At(idx), now, logicalDiskCounter.InstanceName, logicalDiskCounter.Values[queueLength]) + idps.EnsureCapacity(len(logicalDiskCounterValues)) + for _, logicalDiskCounter := range logicalDiskCounterValues { + initializeDiskPendingDataPoint(idps.AppendEmpty(), now, logicalDiskCounter.InstanceName, logicalDiskCounter.Values[queueLength]) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go index b5ac890dbe8..38013243fb1 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s, err := newDiskScraper(ctx, cfg) if err != nil { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go index e2d7dd32414..8bed8103366 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/factory.go @@ -49,7 +49,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s, err := newFileSystemScraper(ctx, cfg) if err != nil { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index c6aa8b385d0..b7aced5e29f 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -21,7 +21,7 @@ import ( "github.com/shirou/gopsutil/disk" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -84,9 +84,9 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { } if len(usages) > 0 { - metrics.Resize(metricsLen) - initializeFileSystemUsageMetric(metrics.At(0), now, usages) - appendSystemSpecificMetrics(metrics, 1, now, usages) + metrics.EnsureCapacity(metricsLen) + initializeFileSystemUsageMetric(metrics.AppendEmpty(), now, usages) + appendSystemSpecificMetrics(metrics, now, usages) } err = errors.Combine() @@ -101,9 +101,9 @@ func initializeFileSystemUsageMetric(metric pdata.Metric, now pdata.Timestamp, d metadata.Metrics.SystemFilesystemUsage.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(fileSystemStatesLen * len(deviceUsages)) - for i, deviceUsage := range deviceUsages { - appendFileSystemUsageStateDataPoints(idps, i*fileSystemStatesLen, now, deviceUsage) + idps.EnsureCapacity(fileSystemStatesLen * len(deviceUsages)) + for _, deviceUsage := range deviceUsages { + appendFileSystemUsageStateDataPoints(idps, now, deviceUsage) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go index 5d674434c22..0cfdc6d2e05 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go @@ -17,18 +17,18 @@ package filesystemscraper import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const fileSystemStatesLen = 2 -func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.Timestamp, deviceUsage *deviceUsage) { - initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.Used)) - initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.Free)) +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.Timestamp, deviceUsage *deviceUsage) { + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.Used)) + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.Free)) } const systemSpecificMetricsLen = 0 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.Timestamp, deviceUsages []*deviceUsage) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, now pdata.Timestamp, deviceUsages []*deviceUsage) { } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go index 25cbd506e24..a7d0e35021e 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go @@ -17,29 +17,28 @@ package filesystemscraper import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const fileSystemStatesLen = 3 -func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.Timestamp, deviceUsage *deviceUsage) { - initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.Used)) - initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.Free)) - initializeFileSystemUsageDataPoint(idps.At(startIdx+2), now, deviceUsage.partition, metadata.LabelFilesystemState.Reserved, int64(deviceUsage.usage.Total-deviceUsage.usage.Used-deviceUsage.usage.Free)) +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.Timestamp, deviceUsage *deviceUsage) { + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.Used)) + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.Free)) + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Reserved, int64(deviceUsage.usage.Total-deviceUsage.usage.Used-deviceUsage.usage.Free)) } const systemSpecificMetricsLen = 1 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.Timestamp, deviceUsages []*deviceUsage) { - metric := metrics.At(startIdx) +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, now pdata.Timestamp, deviceUsages []*deviceUsage) { + metric := metrics.AppendEmpty() metadata.Metrics.SystemFilesystemInodesUsage.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(deviceUsages)) - for idx, deviceUsage := range deviceUsages { - startIndex := 2 * idx - initializeFileSystemUsageDataPoint(idps.At(startIndex+0), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.InodesUsed)) - initializeFileSystemUsageDataPoint(idps.At(startIndex+1), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.InodesFree)) + idps.EnsureCapacity(2 * len(deviceUsages)) + for _, deviceUsage := range deviceUsages { + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Used, int64(deviceUsage.usage.InodesUsed)) + initializeFileSystemUsageDataPoint(idps.AppendEmpty(), now, deviceUsage.partition, metadata.LabelFilesystemState.Free, int64(deviceUsage.usage.InodesFree)) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go index 6c37cb9ed28..74d55376b96 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, logger *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s := newLoadScraper(ctx, logger, cfg) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go index b86df6fa050..5abb81a4d58 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go @@ -22,7 +22,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -63,17 +63,17 @@ func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { return metrics, scrapererror.NewPartialScrapeError(err, metricsLen) } - metrics.Resize(metricsLen) + metrics.EnsureCapacity(metricsLen) - initializeLoadMetric(metrics.At(0), metadata.Metrics.SystemCPULoadAverage1m, now, avgLoadValues.Load1) - initializeLoadMetric(metrics.At(1), metadata.Metrics.SystemCPULoadAverage5m, now, avgLoadValues.Load5) - initializeLoadMetric(metrics.At(2), metadata.Metrics.SystemCPULoadAverage15m, now, avgLoadValues.Load15) + initializeLoadMetric(metrics.AppendEmpty(), metadata.Metrics.SystemCPULoadAverage1m, now, avgLoadValues.Load1) + initializeLoadMetric(metrics.AppendEmpty(), metadata.Metrics.SystemCPULoadAverage5m, now, avgLoadValues.Load5) + initializeLoadMetric(metrics.AppendEmpty(), metadata.Metrics.SystemCPULoadAverage15m, now, avgLoadValues.Load15) return metrics, nil } func initializeLoadMetric(metric pdata.Metric, metricDescriptor metadata.MetricIntf, now pdata.Timestamp, value float64) { metricDescriptor.Init(metric) - dp := metric.DoubleGauge().DataPoints().AppendEmpty() + dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetTimestamp(now) dp.SetValue(value) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go index c46615a215c..99d68cc31bf 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s := newMemoryScraper(ctx, cfg) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go index 515715ec1f5..8330409806c 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go @@ -20,7 +20,7 @@ import ( "github.com/shirou/gopsutil/mem" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -49,8 +49,8 @@ func (s *scraper) Scrape(_ context.Context) (pdata.MetricSlice, error) { return metrics, scrapererror.NewPartialScrapeError(err, metricsLen) } - metrics.Resize(metricsLen) - initializeMemoryUsageMetric(metrics.At(0), now, memInfo) + metrics.EnsureCapacity(metricsLen) + initializeMemoryUsageMetric(metrics.AppendEmpty(), now, memInfo) return metrics, nil } @@ -58,7 +58,7 @@ func initializeMemoryUsageMetric(metric pdata.Metric, now pdata.Timestamp, memIn metadata.Metrics.SystemMemoryUsage.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(memStatesLen) + idps.EnsureCapacity(memStatesLen) appendMemoryUsageStateDataPoints(idps, now, memInfo) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go index ce76541aa98..5d6efb6378a 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go @@ -19,17 +19,17 @@ package memoryscraper import ( "github.com/shirou/gopsutil/mem" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const memStatesLen = 6 func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { - initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) - initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Free)) - initializeMemoryUsageDataPoint(idps.At(2), now, metadata.LabelMemState.Buffered, int64(memInfo.Buffers)) - initializeMemoryUsageDataPoint(idps.At(3), now, metadata.LabelMemState.Cached, int64(memInfo.Cached)) - initializeMemoryUsageDataPoint(idps.At(4), now, metadata.LabelMemState.SlabReclaimable, int64(memInfo.SReclaimable)) - initializeMemoryUsageDataPoint(idps.At(5), now, metadata.LabelMemState.SlabUnreclaimable, int64(memInfo.SUnreclaim)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Free, int64(memInfo.Free)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Buffered, int64(memInfo.Buffers)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Cached, int64(memInfo.Cached)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.SlabReclaimable, int64(memInfo.SReclaimable)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.SlabUnreclaimable, int64(memInfo.SUnreclaim)) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go index 1899721e6d8..d1c38a95bd8 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go @@ -19,14 +19,14 @@ package memoryscraper import ( "github.com/shirou/gopsutil/mem" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const memStatesLen = 3 func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { - initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) - initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Free)) - initializeMemoryUsageDataPoint(idps.At(2), now, metadata.LabelMemState.Inactive, int64(memInfo.Inactive)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Free, int64(memInfo.Free)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Inactive, int64(memInfo.Inactive)) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go index 87e9ec32c5e..a9b0b426891 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go @@ -19,13 +19,13 @@ package memoryscraper import ( "github.com/shirou/gopsutil/mem" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const memStatesLen = 2 func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { - initializeMemoryUsageDataPoint(idps.At(0), now, metadata.LabelMemState.Used, int64(memInfo.Used)) - initializeMemoryUsageDataPoint(idps.At(1), now, metadata.LabelMemState.Free, int64(memInfo.Available)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Used, int64(memInfo.Used)) + initializeMemoryUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelMemState.Free, int64(memInfo.Available)) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go index 0be40b5229a..475775d8f41 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s, err := newNetworkScraper(ctx, cfg) if err != nil { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index 8ee6cbca901..3c9dc6562da 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -23,8 +23,8 @@ import ( "github.com/shirou/gopsutil/net" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -112,11 +112,11 @@ func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice if len(ioCounters) > 0 { startIdx := metrics.Len() - metrics.Resize(startIdx + networkMetricsLen) - initializeNetworkPacketsMetric(metrics.At(startIdx+0), metadata.Metrics.SystemNetworkPackets, startTime, now, ioCounters) - initializeNetworkDroppedPacketsMetric(metrics.At(startIdx+1), metadata.Metrics.SystemNetworkDropped, startTime, now, ioCounters) - initializeNetworkErrorsMetric(metrics.At(startIdx+2), metadata.Metrics.SystemNetworkErrors, startTime, now, ioCounters) - initializeNetworkIOMetric(metrics.At(startIdx+3), metadata.Metrics.SystemNetworkIo, startTime, now, ioCounters) + metrics.EnsureCapacity(startIdx + networkMetricsLen) + initializeNetworkPacketsMetric(metrics.AppendEmpty(), metadata.Metrics.SystemNetworkPackets, startTime, now, ioCounters) + initializeNetworkDroppedPacketsMetric(metrics.AppendEmpty(), metadata.Metrics.SystemNetworkDropped, startTime, now, ioCounters) + initializeNetworkErrorsMetric(metrics.AppendEmpty(), metadata.Metrics.SystemNetworkErrors, startTime, now, ioCounters) + initializeNetworkIOMetric(metrics.AppendEmpty(), metadata.Metrics.SystemNetworkIo, startTime, now, ioCounters) } return nil @@ -126,10 +126,10 @@ func initializeNetworkPacketsMetric(metric pdata.Metric, metricIntf metadata.Met metricIntf.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCountersSlice)) - for idx, ioCounters := range ioCountersSlice { - initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.PacketsSent)) - initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.PacketsRecv)) + idps.EnsureCapacity(2 * len(ioCountersSlice)) + for _, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.PacketsSent)) + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.PacketsRecv)) } } @@ -137,10 +137,10 @@ func initializeNetworkDroppedPacketsMetric(metric pdata.Metric, metricIntf metad metricIntf.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCountersSlice)) - for idx, ioCounters := range ioCountersSlice { - initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.Dropout)) - initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.Dropin)) + idps.EnsureCapacity(2 * len(ioCountersSlice)) + for _, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.Dropout)) + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.Dropin)) } } @@ -148,10 +148,10 @@ func initializeNetworkErrorsMetric(metric pdata.Metric, metricIntf metadata.Metr metricIntf.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCountersSlice)) - for idx, ioCounters := range ioCountersSlice { - initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.Errout)) - initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.Errin)) + idps.EnsureCapacity(2 * len(ioCountersSlice)) + for _, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.Errout)) + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.Errin)) } } @@ -159,10 +159,10 @@ func initializeNetworkIOMetric(metric pdata.Metric, metricIntf metadata.MetricIn metricIntf.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(ioCountersSlice)) - for idx, ioCounters := range ioCountersSlice { - initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.BytesSent)) - initializeNetworkDataPoint(idps.At(2*idx+1), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.BytesRecv)) + idps.EnsureCapacity(2 * len(ioCountersSlice)) + for _, ioCounters := range ioCountersSlice { + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Transmit, int64(ioCounters.BytesSent)) + initializeNetworkDataPoint(idps.AppendEmpty(), startTime, now, ioCounters.Name, metadata.LabelNetworkDirection.Receive, int64(ioCounters.BytesRecv)) } } @@ -186,8 +186,8 @@ func (s *scraper) scrapeAndAppendNetworkConnectionsMetric(metrics pdata.MetricSl tcpConnectionStatusCounts := getTCPConnectionStatusCounts(connections) startIdx := metrics.Len() - metrics.Resize(startIdx + connectionsMetricsLen) - initializeNetworkConnectionsMetric(metrics.At(startIdx), now, tcpConnectionStatusCounts) + metrics.EnsureCapacity(startIdx + connectionsMetricsLen) + initializeNetworkConnectionsMetric(metrics.AppendEmpty(), now, tcpConnectionStatusCounts) return nil } @@ -207,12 +207,10 @@ func initializeNetworkConnectionsMetric(metric pdata.Metric, now pdata.Timestamp metadata.Metrics.SystemNetworkConnections.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(len(connectionStateCounts)) + idps.EnsureCapacity(len(connectionStateCounts)) - i := 0 for connectionState, count := range connectionStateCounts { - initializeNetworkConnectionsDataPoint(idps.At(i), now, metadata.LabelNetworkProtocol.Tcp, connectionState, count) - i++ + initializeNetworkConnectionsDataPoint(idps.AppendEmpty(), now, metadata.LabelNetworkProtocol.Tcp, connectionState, count) } } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go index c2ede5eb1e1..36660504ad4 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s := newPagingScraper(ctx, cfg) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index b121b535b36..2f066d06c95 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -24,7 +24,7 @@ import ( "github.com/shirou/gopsutil/mem" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -86,8 +86,8 @@ func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) er } idx := metrics.Len() - metrics.Resize(idx + pagingUsageMetricsLen) - initializePagingUsageMetric(metrics.At(idx), now, vmem) + metrics.EnsureCapacity(idx + pagingUsageMetricsLen) + initializePagingUsageMetric(metrics.AppendEmpty(), now, vmem) return nil } @@ -95,10 +95,10 @@ func initializePagingUsageMetric(metric pdata.Metric, now pdata.Timestamp, vmem metadata.Metrics.SystemPagingUsage.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(3) - initializePagingUsageDataPoint(idps.At(0), now, metadata.LabelPagingState.Used, int64(vmem.SwapTotal-vmem.SwapFree-vmem.SwapCached)) - initializePagingUsageDataPoint(idps.At(1), now, metadata.LabelPagingState.Free, int64(vmem.SwapFree)) - initializePagingUsageDataPoint(idps.At(2), now, metadata.LabelPagingState.Cached, int64(vmem.SwapCached)) + idps.EnsureCapacity(3) + initializePagingUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelPagingState.Used, int64(vmem.SwapTotal-vmem.SwapFree-vmem.SwapCached)) + initializePagingUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelPagingState.Free, int64(vmem.SwapFree)) + initializePagingUsageDataPoint(idps.AppendEmpty(), now, metadata.LabelPagingState.Cached, int64(vmem.SwapCached)) } func initializePagingUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.Timestamp, stateLabel string, value int64) { @@ -116,9 +116,9 @@ func (s *scraper) scrapeAndAppendPagingMetrics(metrics pdata.MetricSlice) error } idx := metrics.Len() - metrics.Resize(idx + pagingMetricsLen) - initializePagingOperationsMetric(metrics.At(idx+0), s.startTime, now, swap) - initializePageFaultsMetric(metrics.At(idx+1), s.startTime, now, swap) + metrics.EnsureCapacity(idx + pagingMetricsLen) + initializePagingOperationsMetric(metrics.AppendEmpty(), s.startTime, now, swap) + initializePageFaultsMetric(metrics.AppendEmpty(), s.startTime, now, swap) return nil } @@ -126,11 +126,11 @@ func initializePagingOperationsMetric(metric pdata.Metric, startTime, now pdata. metadata.Metrics.SystemPagingOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(4) - initializePagingOperationsDataPoint(idps.At(0), startTime, now, metadata.LabelPagingType.Major, metadata.LabelPagingDirection.PageIn, int64(swap.Sin)) - initializePagingOperationsDataPoint(idps.At(1), startTime, now, metadata.LabelPagingType.Major, metadata.LabelPagingDirection.PageOut, int64(swap.Sout)) - initializePagingOperationsDataPoint(idps.At(2), startTime, now, metadata.LabelPagingType.Minor, metadata.LabelPagingDirection.PageIn, int64(swap.PgIn)) - initializePagingOperationsDataPoint(idps.At(3), startTime, now, metadata.LabelPagingType.Minor, metadata.LabelPagingDirection.PageOut, int64(swap.PgOut)) + idps.EnsureCapacity(4) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Major, metadata.LabelPagingDirection.PageIn, int64(swap.Sin)) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Major, metadata.LabelPagingDirection.PageOut, int64(swap.Sout)) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Minor, metadata.LabelPagingDirection.PageIn, int64(swap.PgIn)) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Minor, metadata.LabelPagingDirection.PageOut, int64(swap.PgOut)) } func initializePagingOperationsDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.Timestamp, typeLabel string, directionLabel string, value int64) { @@ -146,9 +146,9 @@ func initializePageFaultsMetric(metric pdata.Metric, startTime, now pdata.Timest metadata.Metrics.SystemPagingFaults.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2) - initializePageFaultDataPoint(idps.At(0), startTime, now, metadata.LabelPagingType.Major, int64(swap.PgMajFault)) - initializePageFaultDataPoint(idps.At(1), startTime, now, metadata.LabelPagingType.Minor, int64(swap.PgFault-swap.PgMajFault)) + idps.EnsureCapacity(2) + initializePageFaultDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Major, int64(swap.PgMajFault)) + initializePageFaultDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingType.Minor, int64(swap.PgFault-swap.PgMajFault)) } func initializePageFaultDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.Timestamp, typeLabel string, value int64) { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index 545149a1506..828ac7d3090 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -24,7 +24,7 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/perfcounters" "go.opentelemetry.io/collector/receiver/scrapererror" @@ -103,8 +103,8 @@ func (s *scraper) scrapeAndAppendPagingUsageMetric(metrics pdata.MetricSlice) er } idx := metrics.Len() - metrics.Resize(idx + pagingUsageMetricsLen) - s.initializePagingUsageMetric(metrics.At(idx), now, pageFiles) + metrics.EnsureCapacity(idx + pagingUsageMetricsLen) + s.initializePagingUsageMetric(metrics.AppendEmpty(), now, pageFiles) return nil } @@ -112,13 +112,11 @@ func (s *scraper) initializePagingUsageMetric(metric pdata.Metric, now pdata.Tim metadata.Metrics.SystemPagingUsage.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2 * len(pageFiles)) + idps.EnsureCapacity(2 * len(pageFiles)) - idx := 0 for _, pageFile := range pageFiles { - initializePagingUsageDataPoint(idps.At(idx+0), now, pageFile.name, metadata.LabelPagingState.Used, int64(pageFile.usedPages*s.pageSize)) - initializePagingUsageDataPoint(idps.At(idx+1), now, pageFile.name, metadata.LabelPagingState.Free, int64((pageFile.totalPages-pageFile.usedPages)*s.pageSize)) - idx += 2 + initializePagingUsageDataPoint(idps.AppendEmpty(), now, pageFile.name, metadata.LabelPagingState.Used, int64(pageFile.usedPages*s.pageSize)) + initializePagingUsageDataPoint(idps.AppendEmpty(), now, pageFile.name, metadata.LabelPagingState.Free, int64((pageFile.totalPages-pageFile.usedPages)*s.pageSize)) } } @@ -150,8 +148,8 @@ func (s *scraper) scrapeAndAppendPagingOperationsMetric(metrics pdata.MetricSlic if len(memoryCounterValues) > 0 { idx := metrics.Len() - metrics.Resize(idx + pagingMetricsLen) - initializePagingOperationsMetric(metrics.At(idx), s.startTime, now, memoryCounterValues[0]) + metrics.EnsureCapacity(idx + pagingMetricsLen) + initializePagingOperationsMetric(metrics.AppendEmpty(), s.startTime, now, memoryCounterValues[0]) } return nil @@ -161,9 +159,9 @@ func initializePagingOperationsMetric(metric pdata.Metric, startTime, now pdata. metadata.Metrics.SystemPagingOperations.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2) - initializePagingOperationsDataPoint(idps.At(0), startTime, now, metadata.LabelPagingDirection.PageIn, memoryCounterValues.Values[pageReadsPerSec]) - initializePagingOperationsDataPoint(idps.At(1), startTime, now, metadata.LabelPagingDirection.PageOut, memoryCounterValues.Values[pageWritesPerSec]) + idps.EnsureCapacity(2) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingDirection.PageIn, memoryCounterValues.Values[pageReadsPerSec]) + initializePagingOperationsDataPoint(idps.AppendEmpty(), startTime, now, metadata.LabelPagingDirection.PageOut, memoryCounterValues.Values[pageWritesPerSec]) } func initializePagingOperationsDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.Timestamp, directionLabel string, value int64) { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go index 0b881bc87bb..df696a5c680 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/factory.go @@ -44,7 +44,7 @@ func (f *Factory) CreateMetricsScraper( ctx context.Context, _ *zap.Logger, config internal.Config, -) (scraperhelper.MetricsScraper, error) { +) (scraperhelper.Scraper, error) { cfg := config.(*Config) s := newProcessesScraper(ctx, cfg) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go index 224238e2a67..7147ade28da 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go @@ -21,7 +21,7 @@ import ( "github.com/shirou/gopsutil/load" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // scraper for Processes Metrics @@ -45,13 +45,13 @@ func (s *scraper) start(context.Context, component.Host) error { if err != nil { return err } - - s.startTime = pdata.Timestamp(bootTime) + // bootTime is seconds since 1970, timestamps are in nanoseconds. + s.startTime = pdata.Timestamp(bootTime * 1e9) return nil } func (s *scraper) scrape(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() - err := appendSystemSpecificProcessesMetrics(metrics, 0, s.misc) + err := appendSystemSpecificProcessesMetrics(metrics, s.startTime, s.misc) return metrics, err } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_darwin.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_darwin.go index 7a4297443f4..9be55ba4ac9 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_darwin.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_darwin.go @@ -19,11 +19,11 @@ package processesscraper import ( "github.com/shirou/gopsutil/load" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const unixSystemSpecificMetricsLen = 0 -func appendUnixSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, now pdata.Timestamp, misc *load.MiscStat) error { +func appendUnixSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startTime pdata.Timestamp, now pdata.Timestamp, misc *load.MiscStat) error { return nil } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go index 265aa89fa86..bddab0a0537 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go @@ -16,8 +16,8 @@ package processesscraper -import "go.opentelemetry.io/collector/consumer/pdata" +import "go.opentelemetry.io/collector/model/pdata" -func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startTime pdata.Timestamp, miscFunc getMiscStats) error { return nil } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_linux.go index 26dec4e2331..7953eb06d9d 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_linux.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_linux.go @@ -19,20 +19,21 @@ package processesscraper import ( "github.com/shirou/gopsutil/load" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const unixSystemSpecificMetricsLen = 1 -func appendUnixSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, now pdata.Timestamp, misc *load.MiscStat) error { - initializeProcessesCreatedMetric(metrics.At(startIndex), now, misc) +func appendUnixSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startTime pdata.Timestamp, now pdata.Timestamp, misc *load.MiscStat) error { + initializeProcessesCreatedMetric(metrics.AppendEmpty(), startTime, now, misc) return nil } -func initializeProcessesCreatedMetric(metric pdata.Metric, now pdata.Timestamp, misc *load.MiscStat) { +func initializeProcessesCreatedMetric(metric pdata.Metric, startTime, now pdata.Timestamp, misc *load.MiscStat) { metadata.Metrics.SystemProcessesCreated.Init(metric) ddp := metric.IntSum().DataPoints().AppendEmpty() + ddp.SetStartTimestamp(startTime) ddp.SetTimestamp(now) ddp.SetValue(int64(misc.ProcsCreated)) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go index b55494aefff..0703c608dd8 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go @@ -21,7 +21,7 @@ import ( "github.com/shirou/gopsutil/load" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -31,30 +31,31 @@ const ( unixMetricsLen = standardUnixMetricsLen + unixSystemSpecificMetricsLen ) -func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startTime pdata.Timestamp, miscFunc getMiscStats) error { now := pdata.TimestampFromTime(time.Now()) misc, err := miscFunc() if err != nil { return scrapererror.NewPartialScrapeError(err, unixMetricsLen) } - metrics.Resize(startIndex + unixMetricsLen) - initializeProcessesCountMetric(metrics.At(startIndex+0), now, misc) - return appendUnixSystemSpecificProcessesMetrics(metrics, startIndex+1, now, misc) + metrics.EnsureCapacity(unixMetricsLen) + initializeProcessesCountMetric(metrics.AppendEmpty(), startTime, now, misc) + return appendUnixSystemSpecificProcessesMetrics(metrics, startTime, now, misc) } -func initializeProcessesCountMetric(metric pdata.Metric, now pdata.Timestamp, misc *load.MiscStat) { +func initializeProcessesCountMetric(metric pdata.Metric, startTime pdata.Timestamp, now pdata.Timestamp, misc *load.MiscStat) { metadata.Metrics.SystemProcessesCount.Init(metric) ddps := metric.IntSum().DataPoints() - ddps.Resize(2) - initializeProcessesCountDataPoint(ddps.At(0), now, metadata.LabelProcessesStatus.Running, int64(misc.ProcsRunning)) - initializeProcessesCountDataPoint(ddps.At(1), now, metadata.LabelProcessesStatus.Blocked, int64(misc.ProcsBlocked)) + ddps.EnsureCapacity(2) + initializeProcessesCountDataPoint(ddps.AppendEmpty(), startTime, now, metadata.LabelProcessesStatus.Running, int64(misc.ProcsRunning)) + initializeProcessesCountDataPoint(ddps.AppendEmpty(), startTime, now, metadata.LabelProcessesStatus.Blocked, int64(misc.ProcsBlocked)) } -func initializeProcessesCountDataPoint(dataPoint pdata.IntDataPoint, now pdata.Timestamp, statusLabel string, value int64) { +func initializeProcessesCountDataPoint(dataPoint pdata.IntDataPoint, startTime pdata.Timestamp, now pdata.Timestamp, statusLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(metadata.Labels.ProcessesStatus, statusLabel) + dataPoint.SetStartTimestamp(startTime) dataPoint.SetTimestamp(now) dataPoint.SetValue(value) } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go index 07e4addbb68..073a563df51 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/factory.go @@ -47,7 +47,7 @@ func (f *Factory) CreateResourceMetricsScraper( _ context.Context, _ *zap.Logger, cfg internal.Config, -) (scraperhelper.ResourceMetricsScraper, error) { +) (scraperhelper.Scraper, error) { if runtime.GOOS != "linux" && runtime.GOOS != "windows" { return nil, errors.New("process scraper only available on Linux or Windows") } diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go index c49e0b00d97..0bbbae1d4cd 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go @@ -20,7 +20,7 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 36742fcac4e..a2ab8286d41 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -24,8 +24,8 @@ import ( "github.com/shirou/gopsutil/process" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -98,9 +98,9 @@ func (s *scraper) scrape(_ context.Context) (pdata.ResourceMetricsSlice, error) errs.AddPartial(partialErr.Failed, partialErr) } - rms.Resize(len(metadata)) - for i, md := range metadata { - rm := rms.At(i) + rms.EnsureCapacity(len(metadata)) + for _, md := range metadata { + rm := rms.AppendEmpty() md.initializeResource(rm.Resource()) metrics := rm.InstrumentationLibraryMetrics().AppendEmpty().Metrics() @@ -181,17 +181,15 @@ func scrapeAndAppendCPUTimeMetric(metrics pdata.MetricSlice, startTime, now pdat return err } - startIdx := metrics.Len() - metrics.Resize(startIdx + cpuMetricsLen) - initializeCPUTimeMetric(metrics.At(startIdx), startTime, now, times) + initializeCPUTimeMetric(metrics.AppendEmpty(), startTime, now, times) return nil } func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, times *cpu.TimesStat) { metadata.Metrics.ProcessCPUTime.Init(metric) - ddps := metric.DoubleSum().DataPoints() - ddps.Resize(cpuStatesLen) + ddps := metric.Sum().DataPoints() + ddps.EnsureCapacity(cpuStatesLen) appendCPUTimeStateDataPoints(ddps, startTime, now, times) } @@ -201,10 +199,8 @@ func scrapeAndAppendMemoryUsageMetrics(metrics pdata.MetricSlice, now pdata.Time return err } - startIdx := metrics.Len() - metrics.Resize(startIdx + memoryMetricsLen) - initializeMemoryUsageMetric(metrics.At(startIdx+0), metadata.Metrics.ProcessMemoryPhysicalUsage, now, int64(mem.RSS)) - initializeMemoryUsageMetric(metrics.At(startIdx+1), metadata.Metrics.ProcessMemoryVirtualUsage, now, int64(mem.VMS)) + initializeMemoryUsageMetric(metrics.AppendEmpty(), metadata.Metrics.ProcessMemoryPhysicalUsage, now, int64(mem.RSS)) + initializeMemoryUsageMetric(metrics.AppendEmpty(), metadata.Metrics.ProcessMemoryVirtualUsage, now, int64(mem.VMS)) return nil } @@ -224,9 +220,7 @@ func scrapeAndAppendDiskIOMetric(metrics pdata.MetricSlice, startTime, now pdata return err } - startIdx := metrics.Len() - metrics.Resize(startIdx + diskMetricsLen) - initializeDiskIOMetric(metrics.At(startIdx), startTime, now, io) + initializeDiskIOMetric(metrics.AppendEmpty(), startTime, now, io) return nil } @@ -234,9 +228,8 @@ func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.Timestamp, metadata.Metrics.ProcessDiskIo.Init(metric) idps := metric.IntSum().DataPoints() - idps.Resize(2) - initializeDiskIODataPoint(idps.At(0), startTime, now, int64(io.ReadBytes), metadata.LabelProcessDirection.Read) - initializeDiskIODataPoint(idps.At(1), startTime, now, int64(io.WriteBytes), metadata.LabelProcessDirection.Write) + initializeDiskIODataPoint(idps.AppendEmpty(), startTime, now, int64(io.ReadBytes), metadata.LabelProcessDirection.Read) + initializeDiskIODataPoint(idps.AppendEmpty(), startTime, now, int64(io.WriteBytes), metadata.LabelProcessDirection.Write) } func initializeDiskIODataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.Timestamp, value int64, directionLabel string) { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go index 487721ef3a9..2db78072a62 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go @@ -19,16 +19,16 @@ package processscraper import ( "github.com/shirou/gopsutil/cpu" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const cpuStatesLen = 3 func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.Timestamp, cpuTime *cpu.TimesStat) { - initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, metadata.LabelProcessState.User) - initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, metadata.LabelProcessState.System) - initializeCPUTimeDataPoint(ddps.At(2), startTime, now, cpuTime.Iowait, metadata.LabelProcessState.Wait) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.User, metadata.LabelProcessState.User) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.System, metadata.LabelProcessState.System) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.Iowait, metadata.LabelProcessState.Wait) } func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.Timestamp, value float64, stateLabel string) { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go index 6318faf257e..868db703274 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go @@ -19,7 +19,7 @@ package processscraper import ( "github.com/shirou/gopsutil/cpu" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) const cpuStatesLen = 0 diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go index 08e31bf85c3..9685c5aa6a9 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go @@ -22,15 +22,15 @@ import ( "github.com/shirou/gopsutil/cpu" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/metadata" ) const cpuStatesLen = 2 func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.Timestamp, cpuTime *cpu.TimesStat) { - initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, metadata.LabelProcessState.User) - initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, metadata.LabelProcessState.System) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.User, metadata.LabelProcessState.User) + initializeCPUTimeDataPoint(ddps.AppendEmpty(), startTime, now, cpuTime.System, metadata.LabelProcessState.System) } func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.Timestamp, value float64, stateLabel string) { diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go b/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go index 236af01f1fc..bba364982a2 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go +++ b/internal/otel_collector/receiver/hostmetricsreceiver/internal/testutils.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) func AssertContainsAttribute(t *testing.T, attr pdata.AttributeMap, key string) { @@ -47,8 +47,8 @@ func AssertIntGaugeMetricLabelHasValue(t *testing.T, metric pdata.Metric, index assert.Equal(t, expectedVal, val) } -func AssertDoubleSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { - val, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) +func AssertSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.Sum().DataPoints().At(index).LabelsMap().Get(labelName) assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) assert.Equal(t, expectedVal, val) } @@ -58,20 +58,20 @@ func AssertIntSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) } -func AssertDoubleSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { - _, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) +func AssertSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { + _, ok := metric.Sum().DataPoints().At(index).LabelsMap().Get(labelName) assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) } func AssertIntSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { idps := metric.IntSum().DataPoints() for i := 0; i < idps.Len(); i++ { - require.Equal(t, startTime, idps.At(i).StartTimestamp()) + require.Equal(t, startTime, idps.At(i).StartTimestamp(), "Start time %d not found in metric point: %q", startTime, idps.At(i)) } } -func AssertDoubleSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { - ddps := metric.DoubleSum().DataPoints() +func AssertSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { + ddps := metric.Sum().DataPoints() for i := 0; i < ddps.Len(); i++ { require.Equal(t, startTime, ddps.At(i).StartTimestamp()) } @@ -97,8 +97,8 @@ func AssertSameTimeStampForMetrics(t *testing.T, metrics pdata.MetricSlice, star } } - if dt == pdata.MetricDataTypeDoubleSum { - ddps := metric.DoubleSum().DataPoints() + if dt == pdata.MetricDataTypeSum { + ddps := metric.Sum().DataPoints() for j := 0; j < ddps.Len(); j++ { if ts == 0 { ts = ddps.At(j).Timestamp() diff --git a/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml b/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml index b7a0bd641d9..3b1b60ea51b 100644 --- a/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml +++ b/internal/otel_collector/receiver/hostmetricsreceiver/metadata.yaml @@ -101,7 +101,7 @@ metrics: description: Total CPU seconds broken down by different states. unit: s data: - type: double sum + type: sum aggregation: cumulative monotonic: true labels: [process.state] @@ -135,7 +135,7 @@ metrics: description: Total CPU seconds broken down by different states. unit: s data: - type: double sum + type: sum aggregation: cumulative monotonic: true labels: [cpu.state] @@ -153,19 +153,19 @@ metrics: description: Average CPU Load over 1 minute. unit: 1 data: - type: double gauge + type: gauge system.cpu.load_average.5m: description: Average CPU Load over 5 minutes. unit: 1 data: - type: double gauge + type: gauge system.cpu.load_average.15m: description: Average CPU Load over 15 minutes. unit: 1 data: - type: double gauge + type: gauge system.disk.io: description: Disk bytes transferred. @@ -189,7 +189,7 @@ metrics: description: Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time. unit: s data: - type: double sum + type: sum aggregation: cumulative monotonic: true labels: [disk.device] @@ -198,7 +198,7 @@ metrics: description: Time spent in disk operations. unit: s data: - type: double sum + type: sum aggregation: cumulative monotonic: true labels: [disk.device, disk.direction] @@ -207,7 +207,7 @@ metrics: description: Time disk spent activated multiplied by the queue length. unit: s data: - type: double sum + type: sum aggregation: cumulative monotonic: true labels: [disk.device] diff --git a/internal/otel_collector/receiver/jaegerreceiver/config.go b/internal/otel_collector/receiver/jaegerreceiver/config.go index f08387dd262..7ed8432eeaa 100644 --- a/internal/otel_collector/receiver/jaegerreceiver/config.go +++ b/internal/otel_collector/receiver/jaegerreceiver/config.go @@ -17,8 +17,6 @@ package jaegerreceiver import ( "fmt" - "github.com/spf13/cast" - "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" @@ -109,33 +107,23 @@ func (cfg *Config) Unmarshal(componentParser *configparser.Parser) error { return err } - protocols := cast.ToStringMap(componentParser.Get(protocolsFieldName)) - knownProtocols := 0 - if _, ok := protocols[protoGRPC]; !ok { + protocols, err := componentParser.Sub(protocolsFieldName) + if err != nil { + return err + } + + if !protocols.IsSet(protoGRPC) { cfg.GRPC = nil - } else { - knownProtocols++ } - if _, ok := protocols[protoThriftHTTP]; !ok { + if !protocols.IsSet(protoThriftHTTP) { cfg.ThriftHTTP = nil - } else { - knownProtocols++ } - if _, ok := protocols[protoThriftBinary]; !ok { + if !protocols.IsSet(protoThriftBinary) { cfg.ThriftBinary = nil - } else { - knownProtocols++ } - if _, ok := protocols[protoThriftCompact]; !ok { + if !protocols.IsSet(protoThriftCompact) { cfg.ThriftCompact = nil - } else { - knownProtocols++ - } - // UnmarshalExact will ignore empty entries like a protocol with no values, so if a typo happened - // in the protocol that is intended to be enabled will not be enabled. So check if the protocols - // include only known protocols. - if len(protocols) != knownProtocols { - return fmt.Errorf("unknown protocols in the Jaeger receiver") } + return nil } diff --git a/internal/otel_collector/receiver/jaegerreceiver/doc.go b/internal/otel_collector/receiver/jaegerreceiver/doc.go new file mode 100644 index 00000000000..e037dfb2ce6 --- /dev/null +++ b/internal/otel_collector/receiver/jaegerreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jaegerreceiver receives Jaeger traces. +package jaegerreceiver diff --git a/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go index 4c86356eac4..63f138d2351 100644 --- a/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go +++ b/internal/otel_collector/receiver/jaegerreceiver/trace_receiver.go @@ -244,9 +244,7 @@ func (h *agentHandler) EmitZipkinBatch(context.Context, []*zipkincore.Span) (err // EmitBatch implements thrift-gen/agent/Agent and it forwards // Jaeger spans received by the Jaeger agent processor. func (h *agentHandler) EmitBatch(ctx context.Context, batch *jaeger.Batch) error { - ctx = obsreport.ReceiverContext(ctx, h.id, h.transport) ctx = h.obsrecv.StartTracesOp(ctx) - numSpans, err := consumeTraces(ctx, batch, h.nextConsumer) h.obsrecv.EndTracesOp(ctx, thriftFormat, numSpans, err) return err @@ -272,7 +270,6 @@ func (jr *jReceiver) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) ctx = client.NewContext(ctx, c) } - ctx = obsreport.ReceiverContext(ctx, jr.id, grpcTransport) ctx = jr.grpcObsrecv.StartTracesOp(ctx) td := jaegertranslator.ProtoBatchToInternalTraces(r.GetBatch()) @@ -422,7 +419,6 @@ func (jr *jReceiver) HandleThriftHTTPBatch(w http.ResponseWriter, r *http.Reques ctx = client.NewContext(ctx, c) } - ctx = obsreport.ReceiverContext(ctx, jr.id, collectorHTTPTransport) ctx = jr.httpObsrecv.StartTracesOp(ctx) batch, hErr := jr.decodeThriftHTTPBody(r) @@ -455,7 +451,7 @@ func (jr *jReceiver) startCollector(host component.Host) error { nr := mux.NewRouter() nr.HandleFunc("/api/traces", jr.HandleThriftHTTPBatch).Methods(http.MethodPost) - jr.collectorServer = &http.Server{Handler: nr} + jr.collectorServer = jr.config.CollectorHTTPSettings.ToServer(nr) jr.goroutines.Add(1) go func() { defer jr.goroutines.Done() diff --git a/internal/otel_collector/receiver/kafkareceiver/doc.go b/internal/otel_collector/receiver/kafkareceiver/doc.go new file mode 100644 index 00000000000..c675e583a54 --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kafkareceiver receives traces from Kafka. +package kafkareceiver diff --git a/internal/otel_collector/receiver/kafkareceiver/factory.go b/internal/otel_collector/receiver/kafkareceiver/factory.go index 8e54ed3bb3d..7dd50be43e5 100644 --- a/internal/otel_collector/receiver/kafkareceiver/factory.go +++ b/internal/otel_collector/receiver/kafkareceiver/factory.go @@ -54,6 +54,15 @@ func WithTracesUnmarshalers(tracesUnmarshalers ...TracesUnmarshaler) FactoryOpti } } +// WithMetricsUnmarshalers adds MetricsUnmarshalers. +func WithMetricsUnmarshalers(metricsUnmarshalers ...MetricsUnmarshaler) FactoryOption { + return func(factory *kafkaReceiverFactory) { + for _, unmarshaler := range metricsUnmarshalers { + factory.metricsUnmarshalers[unmarshaler.Encoding()] = unmarshaler + } + } +} + // WithLogsUnmarshalers adds LogsUnmarshalers. func WithLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { return func(factory *kafkaReceiverFactory) { @@ -66,8 +75,9 @@ func WithLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { // NewFactory creates Kafka receiver factory. func NewFactory(options ...FactoryOption) component.ReceiverFactory { f := &kafkaReceiverFactory{ - tracesUnmarshalers: defaultTracesUnmarshalers(), - logsUnmarshalers: defaultLogsUnmarshalers(), + tracesUnmarshalers: defaultTracesUnmarshalers(), + metricsUnmarshalers: defaultMetricsUnmarshalers(), + logsUnmarshalers: defaultLogsUnmarshalers(), } for _, o := range options { o(f) @@ -76,6 +86,7 @@ func NewFactory(options ...FactoryOption) component.ReceiverFactory { typeStr, createDefaultConfig, receiverhelper.WithTraces(f.createTracesReceiver), + receiverhelper.WithMetrics(f.createMetricsReceiver), receiverhelper.WithLogs(f.createLogsReceiver), ) } @@ -99,8 +110,9 @@ func createDefaultConfig() config.Receiver { } type kafkaReceiverFactory struct { - tracesUnmarshalers map[string]TracesUnmarshaler - logsUnmarshalers map[string]LogsUnmarshaler + tracesUnmarshalers map[string]TracesUnmarshaler + metricsUnmarshalers map[string]MetricsUnmarshaler + logsUnmarshalers map[string]LogsUnmarshaler } func (f *kafkaReceiverFactory) createTracesReceiver( @@ -117,6 +129,20 @@ func (f *kafkaReceiverFactory) createTracesReceiver( return r, nil } +func (f *kafkaReceiverFactory) createMetricsReceiver( + _ context.Context, + set component.ReceiverCreateSettings, + cfg config.Receiver, + nextConsumer consumer.Metrics, +) (component.MetricsReceiver, error) { + c := cfg.(*Config) + r, err := newMetricsReceiver(*c, set, f.metricsUnmarshalers, nextConsumer) + if err != nil { + return nil, err + } + return r, nil +} + func (f *kafkaReceiverFactory) createLogsReceiver( _ context.Context, set component.ReceiverCreateSettings, diff --git a/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaler.go b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaler.go index 1a9c566a6a6..b593e560453 100644 --- a/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaler.go +++ b/internal/otel_collector/receiver/kafkareceiver/jaeger_unmarshaler.go @@ -20,7 +20,7 @@ import ( "github.com/gogo/protobuf/jsonpb" jaegerproto "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" ) diff --git a/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go index 2d2d912e060..f864603164e 100644 --- a/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go +++ b/internal/otel_collector/receiver/kafkareceiver/kafka_receiver.go @@ -49,6 +49,18 @@ type kafkaTracesConsumer struct { logger *zap.Logger } +// kafkaMetricsConsumer uses sarama to consume and handle messages from kafka. +type kafkaMetricsConsumer struct { + id config.ComponentID + consumerGroup sarama.ConsumerGroup + nextConsumer consumer.Metrics + topics []string + cancelConsumeLoop context.CancelFunc + unmarshaler MetricsUnmarshaler + + logger *zap.Logger +} + // kafkaLogsConsumer uses sarama to consume and handle messages from kafka. type kafkaLogsConsumer struct { id config.ComponentID @@ -62,6 +74,7 @@ type kafkaLogsConsumer struct { } var _ component.Receiver = (*kafkaTracesConsumer)(nil) +var _ component.Receiver = (*kafkaMetricsConsumer)(nil) var _ component.Receiver = (*kafkaLogsConsumer)(nil) func newTracesReceiver(config Config, set component.ReceiverCreateSettings, unmarshalers map[string]TracesUnmarshaler, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { @@ -136,6 +149,77 @@ func (c *kafkaTracesConsumer) Shutdown(context.Context) error { return c.consumerGroup.Close() } +func newMetricsReceiver(config Config, set component.ReceiverCreateSettings, unmarshalers map[string]MetricsUnmarshaler, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { + unmarshaler := unmarshalers[config.Encoding] + if unmarshaler == nil { + return nil, errUnrecognizedEncoding + } + + c := sarama.NewConfig() + c.ClientID = config.ClientID + c.Metadata.Full = config.Metadata.Full + c.Metadata.Retry.Max = config.Metadata.Retry.Max + c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff + if config.ProtocolVersion != "" { + version, err := sarama.ParseKafkaVersion(config.ProtocolVersion) + if err != nil { + return nil, err + } + c.Version = version + } + if err := kafkaexporter.ConfigureAuthentication(config.Authentication, c); err != nil { + return nil, err + } + client, err := sarama.NewConsumerGroup(config.Brokers, config.GroupID, c) + if err != nil { + return nil, err + } + return &kafkaMetricsConsumer{ + id: config.ID(), + consumerGroup: client, + topics: []string{config.Topic}, + nextConsumer: nextConsumer, + unmarshaler: unmarshaler, + logger: set.Logger, + }, nil +} + +func (c *kafkaMetricsConsumer) Start(context.Context, component.Host) error { + ctx, cancel := context.WithCancel(context.Background()) + c.cancelConsumeLoop = cancel + metricsConsumerGroup := &metricsConsumerGroupHandler{ + id: c.id, + logger: c.logger, + unmarshaler: c.unmarshaler, + nextConsumer: c.nextConsumer, + ready: make(chan bool), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: c.id, Transport: transport}), + } + go c.consumeLoop(ctx, metricsConsumerGroup) + <-metricsConsumerGroup.ready + return nil +} + +func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { + for { + // `Consume` should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims + if err := c.consumerGroup.Consume(ctx, c.topics, handler); err != nil { + c.logger.Error("Error from consumer", zap.Error(err)) + } + // check if context was cancelled, signaling that the consumer should stop + if ctx.Err() != nil { + c.logger.Info("Consumer stopped", zap.Error(ctx.Err())) + return ctx.Err() + } + } +} +func (c *kafkaMetricsConsumer) Shutdown(context.Context) error { + c.cancelConsumeLoop() + return c.consumerGroup.Close() +} + func newLogsReceiver(config Config, set component.ReceiverCreateSettings, unmarshalers map[string]LogsUnmarshaler, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { unmarshaler := unmarshalers[config.Encoding] if unmarshaler == nil { @@ -220,6 +304,18 @@ type tracesConsumerGroupHandler struct { obsrecv *obsreport.Receiver } +type metricsConsumerGroupHandler struct { + id config.ComponentID + unmarshaler MetricsUnmarshaler + nextConsumer consumer.Metrics + ready chan bool + readyCloser sync.Once + + logger *zap.Logger + + obsrecv *obsreport.Receiver +} + type logsConsumerGroupHandler struct { id config.ComponentID unmarshaler LogsUnmarshaler @@ -233,6 +329,7 @@ type logsConsumerGroupHandler struct { } var _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) +var _ sarama.ConsumerGroupHandler = (*metricsConsumerGroupHandler)(nil) var _ sarama.ConsumerGroupHandler = (*logsConsumerGroupHandler)(nil) func (c *tracesConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { @@ -259,8 +356,7 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe zap.String("topic", message.Topic)) session.MarkMessage(message, "") - ctx := obsreport.ReceiverContext(session.Context(), c.id, transport) - ctx = c.obsrecv.StartTracesOp(ctx) + ctx := c.obsrecv.StartTracesOp(session.Context()) statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.id.String())} _ = stats.RecordWithTags(ctx, statsTags, statMessageCount.M(1), @@ -283,6 +379,53 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe return nil } +func (c *metricsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { + c.readyCloser.Do(func() { + close(c.ready) + }) + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.id.Name())} + _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + return nil +} + +func (c *metricsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.id.Name())} + _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + return nil +} + +func (c *metricsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + c.logger.Info("Starting consumer group", zap.Int32("partition", claim.Partition())) + for message := range claim.Messages() { + c.logger.Debug("Kafka message claimed", + zap.String("value", string(message.Value)), + zap.Time("timestamp", message.Timestamp), + zap.String("topic", message.Topic)) + session.MarkMessage(message, "") + + ctx := c.obsrecv.StartMetricsOp(session.Context()) + statsTags := []tag.Mutator{tag.Insert(tagInstanceName, c.id.String())} + _ = stats.RecordWithTags(ctx, statsTags, + statMessageCount.M(1), + statMessageOffset.M(message.Offset), + statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + + metrics, err := c.unmarshaler.Unmarshal(message.Value) + if err != nil { + c.logger.Error("failed to unmarshal message", zap.Error(err)) + return err + } + + dataPointCount := metrics.DataPointCount() + err = c.nextConsumer.ConsumeMetrics(session.Context(), metrics) + c.obsrecv.EndMetricsOp(ctx, c.unmarshaler.Encoding(), dataPointCount, err) + if err != nil { + return err + } + } + return nil +} + func (c *logsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { c.readyCloser.Do(func() { close(c.ready) @@ -311,8 +454,7 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess zap.String("topic", message.Topic)) session.MarkMessage(message, "") - ctx := obsreport.ReceiverContext(session.Context(), c.id, transport) - ctx = c.obsrecv.StartTracesOp(ctx) + ctx := c.obsrecv.StartTracesOp(session.Context()) _ = stats.RecordWithTags( ctx, []tag.Mutator{tag.Insert(tagInstanceName, c.id.String())}, diff --git a/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaler.go b/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaler.go deleted file mode 100644 index cca9208a8cf..00000000000 --- a/internal/otel_collector/receiver/kafkareceiver/otlp_unmarshaler.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kafkareceiver - -import ( - "go.opentelemetry.io/collector/consumer/pdata" -) - -type otlpTracesPbUnmarshaler struct { -} - -var _ TracesUnmarshaler = (*otlpTracesPbUnmarshaler)(nil) - -func (p *otlpTracesPbUnmarshaler) Unmarshal(bytes []byte) (pdata.Traces, error) { - return pdata.TracesFromOtlpProtoBytes(bytes) -} - -func (*otlpTracesPbUnmarshaler) Encoding() string { - return defaultEncoding -} - -type otlpLogsPbUnmarshaler struct { -} - -var _ LogsUnmarshaler = (*otlpLogsPbUnmarshaler)(nil) - -func (p *otlpLogsPbUnmarshaler) Unmarshal(bytes []byte) (pdata.Logs, error) { - return pdata.LogsFromOtlpProtoBytes(bytes) -} - -func (*otlpLogsPbUnmarshaler) Encoding() string { - return defaultEncoding -} diff --git a/internal/otel_collector/receiver/kafkareceiver/pdata_unmarshaler.go b/internal/otel_collector/receiver/kafkareceiver/pdata_unmarshaler.go new file mode 100644 index 00000000000..6907c5d3e4f --- /dev/null +++ b/internal/otel_collector/receiver/kafkareceiver/pdata_unmarshaler.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafkareceiver + +import ( + "go.opentelemetry.io/collector/model/pdata" +) + +type pdataLogsUnmarshaler struct { + pdata.LogsUnmarshaler + encoding string +} + +func (p pdataLogsUnmarshaler) Unmarshal(buf []byte) (pdata.Logs, error) { + return p.LogsUnmarshaler.UnmarshalLogs(buf) +} + +func (p pdataLogsUnmarshaler) Encoding() string { + return p.encoding +} + +func newPdataLogsUnmarshaler(unmarshaler pdata.LogsUnmarshaler, encoding string) LogsUnmarshaler { + return pdataLogsUnmarshaler{ + LogsUnmarshaler: unmarshaler, + encoding: encoding, + } +} + +type pdataTracesUnmarshaler struct { + pdata.TracesUnmarshaler + encoding string +} + +func (p pdataTracesUnmarshaler) Unmarshal(buf []byte) (pdata.Traces, error) { + return p.TracesUnmarshaler.UnmarshalTraces(buf) +} + +func (p pdataTracesUnmarshaler) Encoding() string { + return p.encoding +} + +func newPdataTracesUnmarshaler(unmarshaler pdata.TracesUnmarshaler, encoding string) TracesUnmarshaler { + return pdataTracesUnmarshaler{ + TracesUnmarshaler: unmarshaler, + encoding: encoding, + } +} + +type pdataMetricsUnmarshaler struct { + pdata.MetricsUnmarshaler + encoding string +} + +func (p pdataMetricsUnmarshaler) Unmarshal(buf []byte) (pdata.Metrics, error) { + return p.MetricsUnmarshaler.UnmarshalMetrics(buf) +} + +func (p pdataMetricsUnmarshaler) Encoding() string { + return p.encoding +} + +func newPdataMetricsUnmarshaler(unmarshaler pdata.MetricsUnmarshaler, encoding string) MetricsUnmarshaler { + return pdataMetricsUnmarshaler{ + MetricsUnmarshaler: unmarshaler, + encoding: encoding, + } +} diff --git a/internal/otel_collector/receiver/kafkareceiver/unmarshaler.go b/internal/otel_collector/receiver/kafkareceiver/unmarshaler.go index f15e6b90074..5084a3822a7 100644 --- a/internal/otel_collector/receiver/kafkareceiver/unmarshaler.go +++ b/internal/otel_collector/receiver/kafkareceiver/unmarshaler.go @@ -15,7 +15,10 @@ package kafkareceiver import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/translator/trace/zipkinv1" + "go.opentelemetry.io/collector/translator/trace/zipkinv2" ) // TracesUnmarshaler deserializes the message body. @@ -27,6 +30,15 @@ type TracesUnmarshaler interface { Encoding() string } +// MetricsUnmarshaler deserializes the message body +type MetricsUnmarshaler interface { + // Unmarshal deserializes the message body into traces + Unmarshal([]byte) (pdata.Metrics, error) + + // Encoding of the serialized messages + Encoding() string +} + // LogsUnmarshaler deserializes the message body. type LogsUnmarshaler interface { // Unmarshal deserializes the message body into traces. @@ -38,14 +50,14 @@ type LogsUnmarshaler interface { // defaultTracesUnmarshalers returns map of supported encodings with TracesUnmarshaler. func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { - otlp := &otlpTracesPbUnmarshaler{} + otlpPb := newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), defaultEncoding) jaegerProto := jaegerProtoSpanUnmarshaler{} jaegerJSON := jaegerJSONSpanUnmarshaler{} - zipkinProto := zipkinProtoSpanUnmarshaler{} - zipkinJSON := zipkinJSONSpanUnmarshaler{} - zipkinThrift := zipkinThriftSpanUnmarshaler{} + zipkinProto := newPdataTracesUnmarshaler(zipkinv2.NewProtobufTracesUnmarshaler(false, false), "zipkin_proto") + zipkinJSON := newPdataTracesUnmarshaler(zipkinv2.NewJSONTracesUnmarshaler(false), "zipkin_json") + zipkinThrift := newPdataTracesUnmarshaler(zipkinv1.NewThriftTracesUnmarshaler(), "zipkin_thrift") return map[string]TracesUnmarshaler{ - otlp.Encoding(): otlp, + otlpPb.Encoding(): otlpPb, jaegerProto.Encoding(): jaegerProto, jaegerJSON.Encoding(): jaegerJSON, zipkinProto.Encoding(): zipkinProto, @@ -54,9 +66,16 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { } } +func defaultMetricsUnmarshalers() map[string]MetricsUnmarshaler { + otlpPb := newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), defaultEncoding) + return map[string]MetricsUnmarshaler{ + otlpPb.Encoding(): otlpPb, + } +} + func defaultLogsUnmarshalers() map[string]LogsUnmarshaler { - otlp := &otlpLogsPbUnmarshaler{} + otlpPb := newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), defaultEncoding) return map[string]LogsUnmarshaler{ - otlp.Encoding(): otlp, + otlpPb.Encoding(): otlpPb, } } diff --git a/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaler.go b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaler.go index f8d7f7d87fd..ca6a78391ac 100644 --- a/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaler.go +++ b/internal/otel_collector/receiver/kafkareceiver/zipkin_unmarshaler.go @@ -1,10 +1,10 @@ -// Copyright 2020 The OpenTelemetry Authors +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,93 +15,24 @@ package kafkareceiver import ( - "context" - "encoding/json" - - "github.com/apache/thrift/lib/go/thrift" - "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" - zipkinmodel "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" - - "go.opentelemetry.io/collector/consumer/pdata" - zipkintranslator "go.opentelemetry.io/collector/translator/trace/zipkin" + "go.opentelemetry.io/collector/translator/trace/zipkinv1" + "go.opentelemetry.io/collector/translator/trace/zipkinv2" ) -type zipkinProtoSpanUnmarshaler struct { -} - -var _ TracesUnmarshaler = (*zipkinProtoSpanUnmarshaler)(nil) - -func (z zipkinProtoSpanUnmarshaler) Unmarshal(bytes []byte) (pdata.Traces, error) { - parseSpans, err := zipkin_proto3.ParseSpans(bytes, false) - if err != nil { - return pdata.NewTraces(), err - } - return zipkintranslator.V2SpansToInternalTraces(parseSpans, false) -} - -func (z zipkinProtoSpanUnmarshaler) Encoding() string { - return "zipkin_proto" -} - -type zipkinJSONSpanUnmarshaler struct { -} - -var _ TracesUnmarshaler = (*zipkinJSONSpanUnmarshaler)(nil) - -func (z zipkinJSONSpanUnmarshaler) Unmarshal(bytes []byte) (pdata.Traces, error) { - var spans []*zipkinmodel.SpanModel - if err := json.Unmarshal(bytes, &spans); err != nil { - return pdata.NewTraces(), err - } - return zipkintranslator.V2SpansToInternalTraces(spans, false) -} - -func (z zipkinJSONSpanUnmarshaler) Encoding() string { - return "zipkin_json" -} - -type zipkinThriftSpanUnmarshaler struct { -} - -var _ TracesUnmarshaler = (*zipkinThriftSpanUnmarshaler)(nil) - -func (z zipkinThriftSpanUnmarshaler) Unmarshal(bytes []byte) (pdata.Traces, error) { - spans, err := deserializeZipkinThrift(bytes) - if err != nil { - return pdata.NewTraces(), err - } - return zipkintranslator.V1ThriftBatchToInternalTraces(spans) +const ( + zipkinProtobufEncoding = "zipkin_proto" + zipkinJSONEncoding = "zipkin_json" + zipkinThriftEncoding = "zipkin_thrift" +) +func newZipkinProtobufUnmarshaler() TracesUnmarshaler { + return newPdataTracesUnmarshaler(zipkinv2.NewProtobufTracesUnmarshaler(false, false), zipkinProtobufEncoding) } -func (z zipkinThriftSpanUnmarshaler) Encoding() string { - return "zipkin_thrift" +func newZipkinJSONUnmarshaler() TracesUnmarshaler { + return newPdataTracesUnmarshaler(zipkinv2.NewJSONTracesUnmarshaler(false), zipkinJSONEncoding) } -// deserializeThrift decodes Thrift bytes to a list of spans. -// This code comes from jaegertracing/jaeger, ideally we should have imported -// it but this was creating many conflicts so brought the code to here. -// https://github.com/jaegertracing/jaeger/blob/6bc0c122bfca8e737a747826ae60a22a306d7019/model/converter/thrift/zipkin/deserialize.go#L36 -func deserializeZipkinThrift(b []byte) ([]*zipkincore.Span, error) { - buffer := thrift.NewTMemoryBuffer() - buffer.Write(b) - - transport := thrift.NewTBinaryProtocolConf(buffer, nil) - _, size, err := transport.ReadListBegin(context.Background()) // Ignore the returned element type - if err != nil { - return nil, err - } - - // We don't depend on the size returned by ReadListBegin to preallocate the array because it - // sometimes returns a nil error on bad input and provides an unreasonably large int for size - var spans []*zipkincore.Span - for i := 0; i < size; i++ { - zs := &zipkincore.Span{} - if err = zs.Read(context.Background(), transport); err != nil { - return nil, err - } - spans = append(spans, zs) - } - return spans, nil +func newZipkinThriftUnmarshaler() TracesUnmarshaler { + return newPdataTracesUnmarshaler(zipkinv1.NewThriftTracesUnmarshaler(), zipkinThriftEncoding) } diff --git a/internal/otel_collector/receiver/opencensusreceiver/doc.go b/internal/otel_collector/receiver/opencensusreceiver/doc.go new file mode 100644 index 00000000000..a2919660ed3 --- /dev/null +++ b/internal/otel_collector/receiver/opencensusreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensusreceiver receives OpenCensus traces. +package opencensusreceiver diff --git a/internal/otel_collector/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go b/internal/otel_collector/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go index 8479d33dfab..6879df198b6 100644 --- a/internal/otel_collector/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go +++ b/internal/otel_collector/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go @@ -47,7 +47,7 @@ func New(id config.ComponentID, nextConsumer consumer.Metrics) (*Receiver, error ocr := &Receiver{ id: id, nextConsumer: nextConsumer, - obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport, LongLivedCtx: true}), } return ocr, nil } @@ -64,8 +64,6 @@ const ( // Export is the gRPC method that receives streamed metrics from // OpenCensus-metricproto compatible libraries/applications. func (ocr *Receiver) Export(mes agentmetricspb.MetricsService_ExportServer) error { - longLivedRPCCtx := obsreport.ReceiverContext(mes.Context(), ocr.id, receiverTransport) - // Retrieve the first message. It MUST have a non-nil Node. recv, err := mes.Recv() if err != nil { @@ -82,7 +80,7 @@ func (ocr *Receiver) Export(mes agentmetricspb.MetricsService_ExportServer) erro // Now that we've got the first message with a Node, we can start to receive streamed up metrics. for { lastNonNilNode, resource, err = ocr.processReceivedMsg( - longLivedRPCCtx, + mes.Context(), lastNonNilNode, resource, recv) @@ -124,9 +122,7 @@ func (ocr *Receiver) processReceivedMsg( } func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, node *commonpb.Node, resource *resourcepb.Resource, metrics []*ocmetrics.Metric) error { - ctx := ocr.obsrecv.StartMetricsOp( - longLivedRPCCtx, - obsreport.WithLongLivedCtx()) + ctx := ocr.obsrecv.StartMetricsOp(longLivedRPCCtx) numPoints := 0 // Count number of time series and data points. diff --git a/internal/otel_collector/receiver/opencensusreceiver/internal/octrace/opencensus.go b/internal/otel_collector/receiver/opencensusreceiver/internal/octrace/opencensus.go index 214235aff65..35fd0a4e6f4 100644 --- a/internal/otel_collector/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/internal/otel_collector/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/translator/internaldata" ) @@ -54,7 +54,7 @@ func New(id config.ComponentID, nextConsumer consumer.Traces) (*Receiver, error) return &Receiver{ nextConsumer: nextConsumer, id: id, - obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), + obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport, LongLivedCtx: true}), }, nil } @@ -78,8 +78,6 @@ func (ocr *Receiver) Export(tes agenttracepb.TraceService_ExportServer) error { ctx = client.NewContext(ctx, c) } - longLivedRPCCtx := obsreport.ReceiverContext(ctx, ocr.id, receiverTransport) - // The first message MUST have a non-nil Node. recv, err := tes.Recv() if err != nil { @@ -96,7 +94,7 @@ func (ocr *Receiver) Export(tes agenttracepb.TraceService_ExportServer) error { // Now that we've got the first message with a Node, we can start to receive streamed up spans. for { lastNonNilNode, resource, err = ocr.processReceivedMsg( - longLivedRPCCtx, + ctx, lastNonNilNode, resource, recv) @@ -139,9 +137,7 @@ func (ocr *Receiver) processReceivedMsg( } func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, td pdata.Traces) error { - ctx := ocr.obsrecv.StartTracesOp( - longLivedRPCCtx, - obsreport.WithLongLivedCtx()) + ctx := ocr.obsrecv.StartTracesOp(longLivedRPCCtx) err := ocr.nextConsumer.ConsumeTraces(ctx, td) ocr.obsrecv.EndTracesOp(ctx, receiverDataFormat, td.SpanCount(), err) diff --git a/internal/otel_collector/receiver/otlpreceiver/config.go b/internal/otel_collector/receiver/otlpreceiver/config.go index e22a0169327..c431baf7f7b 100644 --- a/internal/otel_collector/receiver/otlpreceiver/config.go +++ b/internal/otel_collector/receiver/otlpreceiver/config.go @@ -17,8 +17,6 @@ package otlpreceiver import ( "fmt" - "github.com/spf13/cast" - "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" @@ -68,27 +66,19 @@ func (cfg *Config) Unmarshal(componentParser *configparser.Parser) error { return err } - // next manually search for protocols in viper, if a protocol is not present it means it is disable. - protocols := cast.ToStringMap(componentParser.Get(protocolsFieldName)) + // next manually search for protocols in the configparser.Parser, if a protocol is not present it means it is disable. + protocols, err := componentParser.Sub(protocolsFieldName) + if err != nil { + return err + } - // UnmarshalExact will ignore empty entries like a protocol with no values, so if a typo happened - // in the protocol that is intended to be enabled will not be enabled. So check if the protocols - // include only known protocols. - knownProtocols := 0 - if _, ok := protocols[protoGRPC]; !ok { + if !protocols.IsSet(protoGRPC) { cfg.GRPC = nil - } else { - knownProtocols++ } - if _, ok := protocols[protoHTTP]; !ok { + if !protocols.IsSet(protoHTTP) { cfg.HTTP = nil - } else { - knownProtocols++ } - if len(protocols) != knownProtocols { - return fmt.Errorf("unknown protocols in the OTLP receiver") - } return nil } diff --git a/internal/otel_collector/receiver/otlpreceiver/doc.go b/internal/otel_collector/receiver/otlpreceiver/doc.go new file mode 100644 index 00000000000..08660f683e6 --- /dev/null +++ b/internal/otel_collector/receiver/otlpreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlpreceiver receives data in OTLP format. +package otlpreceiver diff --git a/internal/otel_collector/receiver/otlpreceiver/factory.go b/internal/otel_collector/receiver/otlpreceiver/factory.go index 16445572236..24cb581e6e1 100644 --- a/internal/otel_collector/receiver/otlpreceiver/factory.go +++ b/internal/otel_collector/receiver/otlpreceiver/factory.go @@ -67,7 +67,7 @@ func createDefaultConfig() config.Receiver { // CreateTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( - ctx context.Context, + _ context.Context, set component.ReceiverCreateSettings, cfg config.Receiver, nextConsumer consumer.Traces, @@ -76,7 +76,7 @@ func createTracesReceiver( return newOtlpReceiver(cfg.(*Config), set.Logger) }) - if err := r.Unwrap().(*otlpReceiver).registerTraceConsumer(ctx, nextConsumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerTraceConsumer(nextConsumer); err != nil { return nil, err } return r, nil @@ -84,7 +84,7 @@ func createTracesReceiver( // CreateMetricsReceiver creates a metrics receiver based on provided config. func createMetricsReceiver( - ctx context.Context, + _ context.Context, set component.ReceiverCreateSettings, cfg config.Receiver, consumer consumer.Metrics, @@ -93,7 +93,7 @@ func createMetricsReceiver( return newOtlpReceiver(cfg.(*Config), set.Logger) }) - if err := r.Unwrap().(*otlpReceiver).registerMetricsConsumer(ctx, consumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerMetricsConsumer(consumer); err != nil { return nil, err } return r, nil @@ -101,7 +101,7 @@ func createMetricsReceiver( // CreateLogReceiver creates a log receiver based on provided config. func createLogReceiver( - ctx context.Context, + _ context.Context, set component.ReceiverCreateSettings, cfg config.Receiver, consumer consumer.Logs, @@ -110,7 +110,7 @@ func createLogReceiver( return newOtlpReceiver(cfg.(*Config), set.Logger) }) - if err := r.Unwrap().(*otlpReceiver).registerLogsConsumer(ctx, consumer); err != nil { + if err := r.Unwrap().(*otlpReceiver).registerLogsConsumer(consumer); err != nil { return nil, err } return r, nil diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go index e677afa1f89..e2bd11f058f 100644 --- a/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go +++ b/internal/otel_collector/receiver/otlpreceiver/internal/logs/otlp.go @@ -20,14 +20,14 @@ import ( "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - collectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" ) const ( dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" ) // Receiver is the type used to handle spans from OpenTelemetry exporters. @@ -39,39 +39,18 @@ type Receiver struct { // New creates a new Receiver reference. func New(id config.ComponentID, nextConsumer consumer.Logs) *Receiver { - r := &Receiver{ + return &Receiver{ id: id, nextConsumer: nextConsumer, obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } - - return r } -const ( - receiverTransport = "grpc" -) - -var receiverID = config.NewIDWithName("otlp", "log") - // Export implements the service Export logs func. -func (r *Receiver) Export(ctx context.Context, req *collectorlog.ExportLogsServiceRequest) (*collectorlog.ExportLogsServiceResponse, error) { - // We need to ensure that it propagates the receiver name as a tag - ctxWithReceiverName := obsreport.ReceiverContext(ctx, r.id, receiverTransport) - - ld := pdata.LogsFromInternalRep(internal.LogsFromOtlp(req)) - err := r.sendToNextConsumer(ctxWithReceiverName, ld) - if err != nil { - return nil, err - } - - return &collectorlog.ExportLogsServiceResponse{}, nil -} - -func (r *Receiver) sendToNextConsumer(ctx context.Context, ld pdata.Logs) error { +func (r *Receiver) Export(ctx context.Context, ld pdata.Logs) (otlpgrpc.LogsResponse, error) { numSpans := ld.LogRecordCount() if numSpans == 0 { - return nil + return otlpgrpc.NewLogsResponse(), nil } if c, ok := client.FromGRPC(ctx); ok { @@ -82,5 +61,5 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, ld pdata.Logs) error err := r.nextConsumer.ConsumeLogs(ctx, ld) r.obsrecv.EndLogsOp(ctx, dataFormatProtobuf, numSpans, err) - return err + return otlpgrpc.NewLogsResponse(), err } diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/marshal_jsonpb.go b/internal/otel_collector/receiver/otlpreceiver/internal/marshal_jsonpb.go deleted file mode 100644 index e0ab286fbf6..00000000000 --- a/internal/otel_collector/receiver/otlpreceiver/internal/marshal_jsonpb.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/gogo/protobuf/jsonpb" - "github.com/gogo/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" -) - -// JSONPb is a copy of https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/marshal_jsonpb.go -// with one difference: github.com/golang/protobuf imports are replaced by github.com/gogo/protobuf -// to make it work with Gogoproto messages that we use. There are no other changes to -// JSONPb done. It should be safe to update (copy again) it to latest version of -// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/marshal_jsonpb.go -// when the github.com/grpc-ecosystem/grpc-gateway dependency is updated. - -//lint:file-ignore S1034 Ignore lint errors, this is a copied file and we don't want to modify it. - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -// -// The NewDecoder method returns a DecoderWrapper, so the underlying -// *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -var ( - // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - if v == nil { - return []byte("null"), nil - } - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Slice { - if rv.IsNil() { - if j.EmitDefaults { - return []byte("[]"), nil - } - return []byte("null"), nil - } - - if rv.Type().Elem().Implements(protoMessageType) { - var buf bytes.Buffer - err := buf.WriteByte('[') - if err != nil { - return nil, err - } - for i := 0; i < rv.Len(); i++ { - if i != 0 { - err = buf.WriteByte(',') - if err != nil { - return nil, err - } - } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { - return nil, err - } - } - err = buf.WriteByte(']') - if err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) runtime.Decoder { - d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} -} - -// DecoderWrapper is a wrapper around a *json.Decoder that adds -// support for protos to the Decode method. -type DecoderWrapper struct { - *json.Decoder -} - -// Decode wraps the embedded decoder's Decode method to support -// protos using a jsonpb.Unmarshaler. -func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) runtime.Encoder { - return runtime.EncoderFunc(func(v interface{}) error { - if err := j.marshalTo(w, v); err != nil { - return err - } - // mimic json.Encoder by adding a newline (makes output - // easier to read when it contains multiple encoded items) - _, err := w.Write(j.Delimiter()) - return err - }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() - -// Delimiter for newline encoded JSON streams. -func (j *JSONPb) Delimiter() []byte { - return []byte("\n") -} - -// allowUnknownFields helps not to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -var allowUnknownFields = true - -// DisallowUnknownFields enables option in decoder (unmarshaler) to -// return an error when it finds an unknown field. This function must be -// called before using the JSON marshaler. -func DisallowUnknownFields() { - allowUnknownFields = false -} - -// convFromType is an exact copy from https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/query.go -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(runtime.String), - reflect.Bool: reflect.ValueOf(runtime.Bool), - reflect.Float64: reflect.ValueOf(runtime.Float64), - reflect.Float32: reflect.ValueOf(runtime.Float32), - reflect.Int64: reflect.ValueOf(runtime.Int64), - reflect.Int32: reflect.ValueOf(runtime.Int32), - reflect.Uint64: reflect.ValueOf(runtime.Uint64), - reflect.Uint32: reflect.ValueOf(runtime.Uint32), - reflect.Slice: reflect.ValueOf(runtime.Bytes), - } -) diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go index 23f5aa95b71..c0215359742 100644 --- a/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go +++ b/internal/otel_collector/receiver/otlpreceiver/internal/metrics/otlp.go @@ -20,14 +20,14 @@ import ( "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" ) const ( dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" ) // Receiver is the type used to handle metrics from OpenTelemetry exporters. @@ -39,38 +39,18 @@ type Receiver struct { // New creates a new Receiver reference. func New(id config.ComponentID, nextConsumer consumer.Metrics) *Receiver { - r := &Receiver{ + return &Receiver{ id: id, nextConsumer: nextConsumer, obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } - return r } -const ( - receiverTransport = "grpc" -) - -var receiverID = config.NewIDWithName("otlp", "metrics") - // Export implements the service Export metrics func. -func (r *Receiver) Export(ctx context.Context, req *collectormetrics.ExportMetricsServiceRequest) (*collectormetrics.ExportMetricsServiceResponse, error) { - receiverCtx := obsreport.ReceiverContext(ctx, r.id, receiverTransport) - - md := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(req)) - - err := r.sendToNextConsumer(receiverCtx, md) - if err != nil { - return nil, err - } - - return &collectormetrics.ExportMetricsServiceResponse{}, nil -} - -func (r *Receiver) sendToNextConsumer(ctx context.Context, md pdata.Metrics) error { - metricCount, dataPointCount := md.MetricAndDataPointCount() - if metricCount == 0 { - return nil +func (r *Receiver) Export(ctx context.Context, md pdata.Metrics) (otlpgrpc.MetricsResponse, error) { + dataPointCount := md.DataPointCount() + if dataPointCount == 0 { + return otlpgrpc.NewMetricsResponse(), nil } if c, ok := client.FromGRPC(ctx); ok { @@ -81,5 +61,5 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, md pdata.Metrics) err err := r.nextConsumer.ConsumeMetrics(ctx, md) r.obsrecv.EndMetricsOp(ctx, dataFormatProtobuf, dataPointCount, err) - return err + return otlpgrpc.NewMetricsResponse(), err } diff --git a/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go b/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go index f5ec0455acd..f20e911df13 100644 --- a/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go +++ b/internal/otel_collector/receiver/otlpreceiver/internal/trace/otlp.go @@ -20,14 +20,14 @@ import ( "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" + "go.opentelemetry.io/collector/model/otlpgrpc" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" ) const ( dataFormatProtobuf = "protobuf" + receiverTransport = "grpc" ) // Receiver is the type used to handle spans from OpenTelemetry exporters. @@ -39,39 +39,19 @@ type Receiver struct { // New creates a new Receiver reference. func New(id config.ComponentID, nextConsumer consumer.Traces) *Receiver { - r := &Receiver{ + return &Receiver{ id: id, nextConsumer: nextConsumer, obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: id, Transport: receiverTransport}), } - - return r } -const ( - receiverTransport = "grpc" -) - -var receiverID = config.NewIDWithName("otlp", "trace") - // Export implements the service Export traces func. -func (r *Receiver) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { +func (r *Receiver) Export(ctx context.Context, td pdata.Traces) (otlpgrpc.TracesResponse, error) { // We need to ensure that it propagates the receiver name as a tag - ctxWithReceiverName := obsreport.ReceiverContext(ctx, r.id, receiverTransport) - internal.TracesCompatibilityChanges(req) - td := pdata.TracesFromInternalRep(internal.TracesFromOtlp(req)) - err := r.sendToNextConsumer(ctxWithReceiverName, td) - if err != nil { - return nil, err - } - - return &collectortrace.ExportTraceServiceResponse{}, nil -} - -func (r *Receiver) sendToNextConsumer(ctx context.Context, td pdata.Traces) error { numSpans := td.SpanCount() if numSpans == 0 { - return nil + return otlpgrpc.NewTracesResponse(), nil } if c, ok := client.FromGRPC(ctx); ok { @@ -82,5 +62,5 @@ func (r *Receiver) sendToNextConsumer(ctx context.Context, td pdata.Traces) erro err := r.nextConsumer.ConsumeTraces(ctx, td) r.obsrecv.EndTracesOp(ctx, dataFormatProtobuf, numSpans, err) - return err + return otlpgrpc.NewTracesResponse(), err } diff --git a/internal/otel_collector/receiver/otlpreceiver/mixin.go b/internal/otel_collector/receiver/otlpreceiver/mixin.go index a5c2a4e2a90..e8602b95b1b 100644 --- a/internal/otel_collector/receiver/otlpreceiver/mixin.go +++ b/internal/otel_collector/receiver/otlpreceiver/mixin.go @@ -17,56 +17,33 @@ package otlpreceiver import ( "context" - gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" "google.golang.org/grpc" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - collectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" + "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) -// RegisterTraceReceiver registers the trace receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterTraceReceiver registers the trace receiver with a gRPC server. +func RegisterTraceReceiver(ctx context.Context, consumer consumer.Traces, serverGRPC *grpc.Server) error { receiver := trace.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectortrace.RegisterTraceServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - err := collectortrace.RegisterTraceServiceHandlerServer(ctx, gatewayMux, receiver) - if err != nil { - return err - } - // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 - return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterTracesServer(serverGRPC, receiver) return nil } -// RegisterMetricsReceiver registers the metrics receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterMetricsReceiver registers the metrics receiver with a gRPC server. +func RegisterMetricsReceiver(ctx context.Context, consumer consumer.Metrics, serverGRPC *grpc.Server) error { receiver := metrics.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectormetrics.RegisterMetricsServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterMetricsServer(serverGRPC, receiver) return nil } -// RegisterLogsReceiver registers the logs receiver with a gRPC server and/or grpc-gateway mux, if non-nil. -func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server, gatewayMux *gatewayruntime.ServeMux) error { +// RegisterLogsReceiver registers the logs receiver with a gRPC server. +func RegisterLogsReceiver(ctx context.Context, consumer consumer.Logs, serverGRPC *grpc.Server) error { receiver := logs.New(config.NewID("otlp"), consumer) - if serverGRPC != nil { - collectorlog.RegisterLogsServiceServer(serverGRPC, receiver) - } - if gatewayMux != nil { - return collectorlog.RegisterLogsServiceHandlerServer(ctx, gatewayMux, receiver) - } + otlpgrpc.RegisterLogsServer(serverGRPC, receiver) return nil } diff --git a/internal/otel_collector/receiver/otlpreceiver/otlp.go b/internal/otel_collector/receiver/otlpreceiver/otlp.go index 51b851a70eb..f4646fc8027 100644 --- a/internal/otel_collector/receiver/otlpreceiver/otlp.go +++ b/internal/otel_collector/receiver/otlpreceiver/otlp.go @@ -20,7 +20,7 @@ import ( "net/http" "sync" - gatewayruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/gorilla/mux" "go.uber.org/zap" "google.golang.org/grpc" @@ -29,20 +29,23 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" - collectorlog "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - collectormetrics "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - collectortrace "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" - "go.opentelemetry.io/collector/receiver/otlpreceiver/internal" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) +const ( + pbContentType = "application/x-protobuf" + jsonContentType = "application/json" +) + // otlpReceiver is the type that exposes Trace and Metrics reception. type otlpReceiver struct { cfg *Config serverGRPC *grpc.Server - gatewayMux *gatewayruntime.ServeMux + httpMux *mux.Router serverHTTP *http.Server traceReceiver *trace.Receiver @@ -62,19 +65,7 @@ func newOtlpReceiver(cfg *Config, logger *zap.Logger) *otlpReceiver { logger: logger, } if cfg.HTTP != nil { - // Use our custom JSON marshaler instead of default Protobuf JSON marshaler. - // This is needed because OTLP spec defines encoding for trace and span id - // and it is only possible to do using Gogoproto-compatible JSONPb marshaler. - jsonpb := &internal.JSONPb{ - EmitDefaults: true, - Indent: " ", - OrigName: true, - } - r.gatewayMux = gatewayruntime.NewServeMux( - gatewayruntime.WithProtoErrorHandler(gatewayruntime.DefaultHTTPProtoErrorHandler), - gatewayruntime.WithMarshalerOption("application/x-protobuf", &xProtobufMarshaler{}), - gatewayruntime.WithMarshalerOption(gatewayruntime.MIMEWildcard, jsonpb), - ) + r.httpMux = mux.NewRouter() } return r @@ -127,15 +118,15 @@ func (r *otlpReceiver) startProtocolServers(host component.Host) error { r.serverGRPC = grpc.NewServer(opts...) if r.traceReceiver != nil { - collectortrace.RegisterTraceServiceServer(r.serverGRPC, r.traceReceiver) + otlpgrpc.RegisterTracesServer(r.serverGRPC, r.traceReceiver) } if r.metricsReceiver != nil { - collectormetrics.RegisterMetricsServiceServer(r.serverGRPC, r.metricsReceiver) + otlpgrpc.RegisterMetricsServer(r.serverGRPC, r.metricsReceiver) } if r.logReceiver != nil { - collectorlog.RegisterLogsServiceServer(r.serverGRPC, r.logReceiver) + otlpgrpc.RegisterLogsServer(r.serverGRPC, r.logReceiver) } err = r.startGRPCServer(r.cfg.GRPC, host) @@ -157,7 +148,7 @@ func (r *otlpReceiver) startProtocolServers(host component.Host) error { } if r.cfg.HTTP != nil { r.serverHTTP = r.cfg.HTTP.ToServer( - r.gatewayMux, + r.httpMux, confighttp.WithErrorHandler(errorHandler), ) err = r.startHTTPServer(r.cfg.HTTP, host) @@ -191,40 +182,67 @@ func (r *otlpReceiver) Shutdown(ctx context.Context) error { return err } -func (r *otlpReceiver) registerTraceConsumer(ctx context.Context, tc consumer.Traces) error { +var tracesPbUnmarshaler = otlp.NewProtobufTracesUnmarshaler() +var tracesJSONUnmarshaler = otlp.NewJSONTracesUnmarshaler() + +func (r *otlpReceiver) registerTraceConsumer(tc consumer.Traces) error { if tc == nil { return componenterror.ErrNilNextConsumer } r.traceReceiver = trace.New(r.cfg.ID(), tc) - if r.gatewayMux != nil { - err := collectortrace.RegisterTraceServiceHandlerServer(ctx, r.gatewayMux, r.traceReceiver) - if err != nil { - return err - } - // Also register an alias handler. This fixes bug https://github.com/open-telemetry/opentelemetry-collector/issues/1968 - return collectortrace.RegisterTraceServiceHandlerServerAlias(ctx, r.gatewayMux, r.traceReceiver) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/traces", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, pbContentType, r.traceReceiver, tracesPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + // For backwards compatibility see https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + r.httpMux.HandleFunc("/v1/trace", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, pbContentType, r.traceReceiver, tracesPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/traces", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, jsonContentType, r.traceReceiver, tracesJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) + // For backwards compatibility see https://github.com/open-telemetry/opentelemetry-collector/issues/1968 + r.httpMux.HandleFunc("/v1/trace", func(resp http.ResponseWriter, req *http.Request) { + handleTraces(resp, req, jsonContentType, r.traceReceiver, tracesJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) } return nil } -func (r *otlpReceiver) registerMetricsConsumer(ctx context.Context, mc consumer.Metrics) error { +var metricsPbUnmarshaler = otlp.NewProtobufMetricsUnmarshaler() +var metricsJSONUnmarshaler = otlp.NewJSONMetricsUnmarshaler() + +func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) error { if mc == nil { return componenterror.ErrNilNextConsumer } r.metricsReceiver = metrics.New(r.cfg.ID(), mc) - if r.gatewayMux != nil { - return collectormetrics.RegisterMetricsServiceHandlerServer(ctx, r.gatewayMux, r.metricsReceiver) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/metrics", func(resp http.ResponseWriter, req *http.Request) { + handleMetrics(resp, req, pbContentType, r.metricsReceiver, metricsPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/metrics", func(resp http.ResponseWriter, req *http.Request) { + handleMetrics(resp, req, jsonContentType, r.metricsReceiver, metricsJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) } return nil } -func (r *otlpReceiver) registerLogsConsumer(ctx context.Context, tc consumer.Logs) error { - if tc == nil { +var logsPbUnmarshaler = otlp.NewProtobufLogsUnmarshaler() +var logsJSONUnmarshaler = otlp.NewJSONLogsUnmarshaler() + +func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) error { + if lc == nil { return componenterror.ErrNilNextConsumer } - r.logReceiver = logs.New(r.cfg.ID(), tc) - if r.gatewayMux != nil { - return collectorlog.RegisterLogsServiceHandlerServer(ctx, r.gatewayMux, r.logReceiver) + r.logReceiver = logs.New(r.cfg.ID(), lc) + if r.httpMux != nil { + r.httpMux.HandleFunc("/v1/logs", func(w http.ResponseWriter, req *http.Request) { + handleLogs(w, req, pbContentType, r.logReceiver, logsPbUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", pbContentType) + r.httpMux.HandleFunc("/v1/logs", func(w http.ResponseWriter, req *http.Request) { + handleLogs(w, req, jsonContentType, r.logReceiver, logsJSONUnmarshaler) + }).Methods(http.MethodPost).Headers("Content-Type", jsonContentType) } return nil } diff --git a/internal/otel_collector/receiver/otlpreceiver/otlphttp.go b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go index 9da35519f64..16ad9b25d26 100644 --- a/internal/otel_collector/receiver/otlpreceiver/otlphttp.go +++ b/internal/otel_collector/receiver/otlpreceiver/otlphttp.go @@ -16,61 +16,169 @@ package otlpreceiver import ( "bytes" + "io/ioutil" "net/http" "github.com/gogo/protobuf/jsonpb" - "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) -// xProtobufMarshaler is a Marshaler which wraps runtime.ProtoMarshaller -// and sets ContentType to application/x-protobuf -type xProtobufMarshaler struct { - *runtime.ProtoMarshaller +var jsonMarshaler = &jsonpb.Marshaler{} + +func handleTraces( + resp http.ResponseWriter, + req *http.Request, + contentType string, + tracesReceiver *trace.Receiver, + tracesUnmarshaler pdata.TracesUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } + + td, err := tracesUnmarshaler.UnmarshalTraces(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + + _, err = tracesReceiver.Export(req.Context(), td) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) } -// ContentType always returns "application/x-protobuf". -func (*xProtobufMarshaler) ContentType() string { - return "application/x-protobuf" +func handleMetrics( + resp http.ResponseWriter, + req *http.Request, + contentType string, + metricsReceiver *metrics.Receiver, + metricsUnmarshaler pdata.MetricsUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } + + md, err := metricsUnmarshaler.UnmarshalMetrics(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + + _, err = metricsReceiver.Export(req.Context(), md) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) } -var jsonMarshaler = &jsonpb.Marshaler{} +func handleLogs( + resp http.ResponseWriter, + req *http.Request, + contentType string, + logsReceiver *logs.Receiver, + logsUnmarshaler pdata.LogsUnmarshaler) { + body, ok := readAndCloseBody(resp, req, contentType) + if !ok { + return + } -// errorHandler encodes the HTTP error message inside a rpc.Status message as required -// by the OTLP protocol. -func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) { - var ( - msg []byte - s *status.Status - err error - ) - // Pre-computed status with code=Internal to be used in case of a marshaling error. - fallbackMsg := []byte(`{"code": 13, "message": "failed to marshal error message"}`) - fallbackContentType := "application/json" + ld, err := logsUnmarshaler.UnmarshalLogs(body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return + } + _, err = logsReceiver.Export(req.Context(), ld) + if err != nil { + writeError(resp, contentType, err, http.StatusInternalServerError) + return + } + + // TODO: Pass response from grpc handler when otlpgrpc returns concrete type. + writeResponse(resp, contentType, http.StatusOK, &types.Empty{}) +} + +func readAndCloseBody(resp http.ResponseWriter, req *http.Request, contentType string) ([]byte, bool) { + body, err := ioutil.ReadAll(req.Body) + if err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return nil, false + } + if err = req.Body.Close(); err != nil { + writeError(resp, contentType, err, http.StatusBadRequest) + return nil, false + } + return body, true +} + +// writeError encodes the HTTP error inside a rpc.Status message as required by the OTLP protocol. +func writeError(w http.ResponseWriter, contentType string, err error, statusCode int) { + s, ok := status.FromError(err) + if ok { + writeResponse(w, contentType, statusCode, s.Proto()) + } else { + writeErrorMsg(w, contentType, err.Error(), statusCode) + } +} + +// writeErrorMsg encodes the HTTP error message inside a rpc.Status message as required +// by the OTLP protocol. +func writeErrorMsg(w http.ResponseWriter, contentType string, errMsg string, statusCode int) { + var s *status.Status if statusCode == http.StatusBadRequest { s = status.New(codes.InvalidArgument, errMsg) } else { - s = status.New(codes.Internal, errMsg) + s = status.New(codes.Unknown, errMsg) } - contentType := r.Header.Get("Content-Type") + writeResponse(w, contentType, statusCode, s.Proto()) +} + +// errorHandler encodes the HTTP error message inside a rpc.Status message as required +// by the OTLP protocol. +func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) { + writeErrorMsg(w, r.Header.Get("Content-Type"), errMsg, statusCode) +} + +// Pre-computed status with code=Internal to be used in case of a marshaling error. +var fallbackMsg = []byte(`{"code": 13, "message": "failed to marshal error message"}`) + +const fallbackContentType = "application/json" + +func writeResponse(w http.ResponseWriter, contentType string, statusCode int, rsp proto.Message) { + var err error + var msg []byte if contentType == "application/json" { buf := new(bytes.Buffer) - err = jsonMarshaler.Marshal(buf, s.Proto()) + err = jsonMarshaler.Marshal(buf, rsp) msg = buf.Bytes() } else { - msg, err = proto.Marshal(s.Proto()) + msg, err = proto.Marshal(rsp) } + if err != nil { msg = fallbackMsg contentType = fallbackContentType statusCode = http.StatusInternalServerError } - w.Header().Set("Content-Type", contentType) w.WriteHeader(statusCode) - w.Write(msg) // nolint:errcheck + // Nothing we can do with the error if we cannot write to the response. + _, _ = w.Write(msg) } diff --git a/internal/otel_collector/receiver/prometheusreceiver/README.md b/internal/otel_collector/receiver/prometheusreceiver/README.md index 028269e2b38..e0085ada2c7 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/README.md +++ b/internal/otel_collector/receiver/prometheusreceiver/README.md @@ -21,9 +21,9 @@ and please don't use it if the following limitations is a concern: ## Getting Started This receiver is a drop-in replacement for getting Prometheus to scrape your -services. It supports the full set of Prometheus configuration, including -service discovery. Just like you would write in a YAML configuration file -before starting Prometheus, such as with: +services. It supports [the full set of Prometheus configuration in `scrape_config`][sc], +including service discovery. Just like you would write in a YAML configuration +file before starting Prometheus, such as with: **Note**: Since the collector configuration supports env variable substitution `$` characters in your prometheus configuration are interpreted as environment @@ -65,3 +65,5 @@ receivers: regex: "(request_duration_seconds.*|response_duration_seconds.*)" action: keep ``` + +[sc]: https://github.com/prometheus/prometheus/blob/v2.28.1/docs/configuration/configuration.md#scrape_config \ No newline at end of file diff --git a/internal/otel_collector/receiver/prometheusreceiver/config.go b/internal/otel_collector/receiver/prometheusreceiver/config.go index d7080de38f4..af310545393 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/config.go +++ b/internal/otel_collector/receiver/prometheusreceiver/config.go @@ -15,6 +15,7 @@ package prometheusreceiver import ( + "errors" "fmt" "time" @@ -49,10 +50,22 @@ type Config struct { var _ config.Receiver = (*Config)(nil) var _ config.CustomUnmarshable = (*Config)(nil) -// Validate checks the receiver configuration is valid +// Validate checks the receiver configuration is valid. func (cfg *Config) Validate() error { - if cfg.PrometheusConfig != nil && len(cfg.PrometheusConfig.ScrapeConfigs) == 0 { - return errNilScrapeConfig + if cfg.PrometheusConfig == nil { + return nil // noop receiver + } + if len(cfg.PrometheusConfig.ScrapeConfigs) == 0 { + return errors.New("no Prometheus scrape_configs") + } + + for _, sc := range cfg.PrometheusConfig.ScrapeConfigs { + for _, rc := range sc.MetricRelabelConfigs { + if rc.TargetLabel == "__name__" { + // TODO(#2297): Remove validation after renaming is fixed + return fmt.Errorf("error validating scrapeconfig for job %v: %w", sc.JobName, errRenamingDisallowed) + } + } } return nil } @@ -84,5 +97,6 @@ func (cfg *Config) Unmarshal(componentParser *configparser.Parser) error { if err != nil { return fmt.Errorf("prometheus receiver failed to unmarshal yaml to prometheus config: %s", err) } + return nil } diff --git a/internal/otel_collector/receiver/prometheusreceiver/doc.go b/internal/otel_collector/receiver/prometheusreceiver/doc.go index cc269dc5421..f90c4691881 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/doc.go +++ b/internal/otel_collector/receiver/prometheusreceiver/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheusreceiver has the logic for scraping Prometheus metrics from -// already instrumented applications and then passing them onto a metricsink instance. +// Package prometheusreceiver autodiscovers and scrapes Prometheus metrics handlers, often served at /metrics. package prometheusreceiver diff --git a/internal/otel_collector/receiver/prometheusreceiver/factory.go b/internal/otel_collector/receiver/prometheusreceiver/factory.go index b2d0f40e893..74e57098294 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/factory.go +++ b/internal/otel_collector/receiver/prometheusreceiver/factory.go @@ -32,9 +32,7 @@ const ( typeStr = "prometheus" ) -var ( - errNilScrapeConfig = errors.New("expecting a non-nil ScrapeConfig") -) +var errRenamingDisallowed = errors.New("metric renaming using metric_relabel_configs is disallowed") // NewFactory creates a new Prometheus receiver factory. func NewFactory() component.ReceiverFactory { diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go index e5b4ccd20de..0898f252a76 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metadata.go @@ -16,6 +16,7 @@ package internal import ( "errors" + "sync" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -33,10 +34,27 @@ type ScrapeManager interface { } type metadataService struct { - sm ScrapeManager + sync.Mutex + stopped bool + sm ScrapeManager +} + +func (s *metadataService) Close() { + s.Lock() + s.stopped = true + s.Unlock() } func (s *metadataService) Get(job, instance string) (MetadataCache, error) { + s.Lock() + defer s.Unlock() + + // If we're already stopped return early so that we don't call scrapeManager.TargetsAll() + // which will result in deadlock if scrapeManager is being stopped. + if s.stopped { + return nil, errAlreadyStopped + } + targetGroup, ok := s.sm.TargetsAll()[job] if !ok { return nil, errors.New("unable to find a target group with job=" + job) diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go index eca80c3fdec..e8f200a46f9 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metricfamily.go @@ -15,6 +15,7 @@ package internal import ( + "fmt" "sort" "strings" @@ -22,6 +23,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/scrape" + "go.uber.org/zap" "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -46,7 +48,7 @@ type metricFamily struct { groups map[string]*metricGroup } -func newMetricFamily(metricName string, mc MetadataCache) MetricFamily { +func newMetricFamily(metricName string, mc MetadataCache, logger *zap.Logger) MetricFamily { familyName := normalizeMetricName(metricName) // lookup metadata based on familyName @@ -62,11 +64,17 @@ func newMetricFamily(metricName string, mc MetadataCache) MetricFamily { metadata.Metric = familyName metadata.Type = textparse.MetricTypeUnknown } + } else if !ok && isInternalMetric(metricName) { + metadata = defineInternalMetric(metricName, metadata, logger) + } + ocaMetricType := convToOCAMetricType(metadata.Type) + if ocaMetricType == metricspb.MetricDescriptor_UNSPECIFIED { + logger.Debug(fmt.Sprintf("Invalid metric : %s %+v", metricName, metadata)) } return &metricFamily{ name: familyName, - mtype: convToOCAMetricType(metadata.Type), + mtype: ocaMetricType, mc: mc, droppedTimeseries: 0, labelKeys: make(map[string]bool), @@ -77,6 +85,35 @@ func newMetricFamily(metricName string, mc MetadataCache) MetricFamily { } } +// Define manually the metadata of prometheus scrapper internal metrics +func defineInternalMetric(metricName string, metadata scrape.MetricMetadata, logger *zap.Logger) scrape.MetricMetadata { + if metadata.Metric != "" && metadata.Type != "" && metadata.Help != "" { + logger.Debug("Internal metric seems already fully defined") + return metadata + } + metadata.Metric = metricName + + switch metricName { + case scrapeUpMetricName: + metadata.Type = textparse.MetricTypeGauge + metadata.Help = "The scraping was successful" + case "scrape_duration_seconds": + metadata.Unit = "seconds" + metadata.Type = textparse.MetricTypeGauge + metadata.Help = "Duration of the scrape" + case "scrape_samples_scraped": + metadata.Type = textparse.MetricTypeGauge + metadata.Help = "The number of samples the target exposed" + case "scrape_series_added": + metadata.Type = textparse.MetricTypeGauge + metadata.Help = "The approximate number of new series in this scrape" + case "scrape_samples_post_metric_relabeling": + metadata.Type = textparse.MetricTypeGauge + metadata.Help = "The number of samples remaining after metric relabeling was applied" + } + return metadata +} + func (mf *metricFamily) IsSameFamily(metricName string) bool { // trim known suffix if necessary familyName := normalizeMetricName(metricName) diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go index ece858a9e8b..14d69f7e0f6 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metrics_adjuster.go @@ -266,7 +266,7 @@ func (ma *MetricsAdjuster) adjustTimeseries(metricType metricspb.MetricDescripto func (ma *MetricsAdjuster) adjustPoints(metricType metricspb.MetricDescriptor_Type, current, initial, previous []*metricspb.Point) bool { - if len(current) != 1 || len(initial) != 1 || len(current) != 1 { + if len(current) != 1 || len(initial) != 1 || len(previous) != 1 { ma.logger.Info("Adjusting Points, all lengths should be 1", zap.Int("len(current)", len(current)), zap.Int("len(initial)", len(initial)), zap.Int("len(previous)", len(previous))) return true @@ -286,6 +286,9 @@ func (ma *MetricsAdjuster) isReset(metricType metricspb.MetricDescriptor_Type, // note: sum of squared deviation not currently supported currentDist := current.GetDistributionValue() previousDist := previous.GetDistributionValue() + if currentDist == nil || previousDist == nil { + return false + } if currentDist.Count < previousDist.Count || currentDist.Sum < previousDist.Sum { // reset detected return false @@ -293,7 +296,16 @@ func (ma *MetricsAdjuster) isReset(metricType metricspb.MetricDescriptor_Type, case metricspb.MetricDescriptor_SUMMARY: currentSummary := current.GetSummaryValue() previousSummary := previous.GetSummaryValue() - if currentSummary.Count.GetValue() < previousSummary.Count.GetValue() || currentSummary.Sum.GetValue() < previousSummary.Sum.GetValue() { + if currentSummary == nil || previousSummary == nil { + return false + } + if (currentSummary.Count != nil && + previousSummary.Count != nil && + currentSummary.Count.GetValue() < previousSummary.Count.GetValue()) || + + (currentSummary.Sum != nil && + previousSummary.Sum != nil && + currentSummary.Sum.GetValue() < previousSummary.Sum.GetValue()) { // reset detected return false } diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go index 3ffe7c159d6..7210db9b41d 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/metricsbuilder.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "regexp" + "sort" "strconv" "strings" @@ -25,6 +26,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/pkg/value" "go.uber.org/zap" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -33,12 +35,13 @@ const ( metricsSuffixCount = "_count" metricsSuffixBucket = "_bucket" metricsSuffixSum = "_sum" + metricSuffixTotal = "_total" startTimeMetricName = "process_start_time_seconds" scrapeUpMetricName = "up" ) var ( - trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum} + trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum, metricSuffixTotal} errNoDataToBuild = errors.New("there's no data to build") errNoBoundaryLabel = errors.New("given metricType has no BucketLabel or QuantileLabel") errEmptyBoundaryLabel = errors.New("BucketLabel or QuantileLabel is empty") @@ -56,12 +59,13 @@ type metricBuilder struct { startTime float64 logger *zap.Logger currentMf MetricFamily + stalenessStore *stalenessStore } // newMetricBuilder creates a MetricBuilder which is allowed to feed all the datapoints from a single prometheus // scraped page by calling its AddDataPoint function, and turn them into an opencensus data.MetricsData object // by calling its Build function -func newMetricBuilder(mc MetadataCache, useStartTimeMetric bool, startTimeMetricRegex string, logger *zap.Logger) *metricBuilder { +func newMetricBuilder(mc MetadataCache, useStartTimeMetric bool, startTimeMetricRegex string, logger *zap.Logger, stalenessStore *stalenessStore) *metricBuilder { var regex *regexp.Regexp if startTimeMetricRegex != "" { regex, _ = regexp.Compile(startTimeMetricRegex) @@ -74,6 +78,7 @@ func newMetricBuilder(mc MetadataCache, useStartTimeMetric bool, startTimeMetric droppedTimeseries: 0, useStartTimeMetric: useStartTimeMetric, startTimeMetricRegex: regex, + stalenessStore: stalenessStore, } } @@ -86,7 +91,32 @@ func (b *metricBuilder) matchStartTimeMetric(metricName string) bool { } // AddDataPoint is for feeding prometheus data complexValue in its processing order -func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) error { +func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) (rerr error) { + // Any datapoint with duplicate labels MUST be rejected per: + // * https://github.com/open-telemetry/wg-prometheus/issues/44 + // * https://github.com/open-telemetry/opentelemetry-collector/issues/3407 + // as Prometheus rejects such too as of version 2.16.0, released on 2020-02-13. + seen := make(map[string]bool) + dupLabels := make([]string, 0, len(ls)) + for _, label := range ls { + if _, ok := seen[label.Name]; ok { + dupLabels = append(dupLabels, label.Name) + } + seen[label.Name] = true + } + if len(dupLabels) != 0 { + sort.Strings(dupLabels) + return fmt.Errorf("invalid sample: non-unique label names: %q", dupLabels) + } + + defer func() { + // Only mark this data point as in the current scrape + // iff it isn't a stale metric. + if rerr == nil && !value.IsStaleNaN(v) { + b.stalenessStore.markAsCurrentlySeen(ls, t) + } + }() + metricName := ls.Get(model.MetricNameLabel) switch { case metricName == "": @@ -96,7 +126,6 @@ func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) error case isInternalMetric(metricName): b.hasInternalMetric = true lm := ls.Map() - delete(lm, model.MetricNameLabel) // See https://www.prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series // up: 1 if the instance is healthy, i.e. reachable, or 0 if the scrape failed. if metricName == scrapeUpMetricName && v != 1.0 { @@ -111,7 +140,6 @@ func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) error zap.String("target_labels", fmt.Sprintf("%v", lm))) } } - return nil case b.useStartTimeMetric && b.matchStartTimeMetric(metricName): b.startTime = v } @@ -125,9 +153,9 @@ func (b *metricBuilder) AddDataPoint(ls labels.Labels, t int64, v float64) error if m != nil { b.metrics = append(b.metrics, m) } - b.currentMf = newMetricFamily(metricName, b.mc) + b.currentMf = newMetricFamily(metricName, b.mc, b.logger) } else if b.currentMf == nil { - b.currentMf = newMetricFamily(metricName, b.mc) + b.currentMf = newMetricFamily(metricName, b.mc, b.logger) } return b.currentMf.Add(metricName, ls, t, v) diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go index 831bd6e42bf..b7ed0427b18 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/ocastore.go @@ -51,7 +51,8 @@ type OcaStore struct { receiverID config.ComponentID externalLabels labels.Labels - logger *zap.Logger + logger *zap.Logger + stalenessStore *stalenessStore } // NewOcaStore returns an ocaStore instance, which can be acted as prometheus' scrape.Appendable @@ -74,6 +75,7 @@ func NewOcaStore( startTimeMetricRegex: startTimeMetricRegex, receiverID: receiverID, externalLabels: externalLabels, + stalenessStore: newStalenessStore(), } } @@ -88,6 +90,9 @@ func (o *OcaStore) SetScrapeManager(scrapeManager *scrape.Manager) { func (o *OcaStore) Appender(context.Context) storage.Appender { state := atomic.LoadInt32(&o.running) if state == runningStateReady { + // Firstly prepare the stalenessStore for a new scrape cyle. + o.stalenessStore.refresh() + return newTransaction( o.ctx, o.jobsMap, @@ -98,6 +103,7 @@ func (o *OcaStore) Appender(context.Context) storage.Appender { o.sink, o.externalLabels, o.logger, + o.stalenessStore, ) } else if state == runningStateInit { panic("ScrapeManager is not set") @@ -106,9 +112,11 @@ func (o *OcaStore) Appender(context.Context) storage.Appender { return noop } -func (o *OcaStore) Close() error { - atomic.CompareAndSwapInt32(&o.running, runningStateReady, runningStateStop) - return nil +// Close OcaStore as well as the internal metadataService. +func (o *OcaStore) Close() { + if atomic.CompareAndSwapInt32(&o.running, runningStateReady, runningStateStop) { + o.mc.Close() + } } // noopAppender, always return error on any operations diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricfamily.go b/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricfamily.go index dd03a25f2cc..a92dc91f643 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricfamily.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricfamily.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type metricFamilyPdata struct { @@ -100,7 +100,7 @@ func (mf *metricFamilyPdata) updateLabelKeys(ls labels.Labels) { var _ = (*metricFamilyPdata)(nil).updateLabelKeys func (mf *metricFamilyPdata) isCumulativeTypePdata() bool { - return mf.mtype == pdata.MetricDataTypeDoubleSum || + return mf.mtype == pdata.MetricDataTypeSum || mf.mtype == pdata.MetricDataTypeIntSum || mf.mtype == pdata.MetricDataTypeHistogram || mf.mtype == pdata.MetricDataTypeSummary diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go b/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go index ea1b2a2f87e..ab2c9a7c6d1 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) func isUsefulLabelPdata(mType pdata.MetricDataType, labelKey string) bool { @@ -60,10 +60,10 @@ func convToPdataMetricType(metricType textparse.MetricType) pdata.MetricDataType switch metricType { case textparse.MetricTypeCounter: // always use float64, as it's the internal data type used in prometheus - return pdata.MetricDataTypeDoubleSum + return pdata.MetricDataTypeSum // textparse.MetricTypeUnknown is converted to gauge by default to fix Prometheus untyped metrics from being dropped case textparse.MetricTypeGauge, textparse.MetricTypeUnknown: - return pdata.MetricDataTypeDoubleGauge + return pdata.MetricDataTypeGauge case textparse.MetricTypeHistogram: return pdata.MetricDataTypeHistogram // dropping support for gaugehistogram for now until we have an official spec of its implementation diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/prom_to_otlp.go b/internal/otel_collector/receiver/prometheusreceiver/internal/prom_to_otlp.go index 25d0bec79e3..9f8aa13cadf 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/prom_to_otlp.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/prom_to_otlp.go @@ -17,7 +17,7 @@ package internal import ( "net" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/staleness_store.go b/internal/otel_collector/receiver/prometheusreceiver/internal/staleness_store.go new file mode 100644 index 00000000000..66cecc27075 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/staleness_store.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "math" + "sync" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" +) + +// Prometheus uses a special NaN to record staleness as per +// https://github.com/prometheus/prometheus/blob/67dc912ac8b24f94a1fc478f352d25179c94ab9b/pkg/value/value.go#L24-L28 +var stalenessSpecialValue = math.Float64frombits(value.StaleNaN) + +// stalenessStore tracks metrics/labels that appear between scrapes, the current and last scrape. +// The labels that appear only in the previous scrape are considered stale and for those, we +// issue a staleness marker aka a special NaN value. +// See https://github.com/open-telemetry/opentelemetry-collector/issues/3413 +type stalenessStore struct { + mu sync.Mutex // mu protects all the fields below. + currentHashes map[uint64]int64 + previousHashes map[uint64]int64 + previous []labels.Labels + current []labels.Labels +} + +func newStalenessStore() *stalenessStore { + return &stalenessStore{ + previousHashes: make(map[uint64]int64), + currentHashes: make(map[uint64]int64), + } +} + +// refresh copies over all the current values to previous, and prepares. +// refresh must be called before every new scrape. +func (ss *stalenessStore) refresh() { + ss.mu.Lock() + defer ss.mu.Unlock() + + // 1. Clear ss.previousHashes firstly. Please don't edit + // this map clearing idiom as it ensures speed. + // See: + // * https://github.com/golang/go/issues/20138 + // * https://github.com/golang/go/commit/aee71dd70b3779c66950ce6a952deca13d48e55e + for hash := range ss.previousHashes { + delete(ss.previousHashes, hash) + } + // 2. Copy over ss.currentHashes to ss.previousHashes. + for hash := range ss.currentHashes { + ss.previousHashes[hash] = ss.currentHashes[hash] + } + // 3. Clear ss.currentHashes, with the map clearing idiom for speed. + // See: + // * https://github.com/golang/go/issues/20138 + // * https://github.com/golang/go/commit/aee71dd70b3779c66950ce6a952deca13d48e55e + for hash := range ss.currentHashes { + delete(ss.currentHashes, hash) + } + // 4. Copy all the prior labels from what was previously ss.current. + ss.previous = ss.current + // 5. Clear ss.current to make for another cycle. + ss.current = nil +} + +// isStale returns whether lbl was seen only in the previous scrape and not the current. +func (ss *stalenessStore) isStale(lbl labels.Labels) bool { + ss.mu.Lock() + defer ss.mu.Unlock() + + hash := lbl.Hash() + _, inPrev := ss.previousHashes[hash] + _, inCurrent := ss.currentHashes[hash] + return inPrev && !inCurrent +} + +// markAsCurrentlySeen adds lbl to the manifest of labels seen in the current scrape. +// This method should be called before refresh, but during a scrape whenever labels are encountered. +func (ss *stalenessStore) markAsCurrentlySeen(lbl labels.Labels, seenAtMs int64) { + ss.mu.Lock() + defer ss.mu.Unlock() + + ss.currentHashes[lbl.Hash()] = seenAtMs + ss.current = append(ss.current, lbl) +} + +type staleEntry struct { + labels labels.Labels + seenAtMs int64 +} + +// emitStaleLabels returns the labels that were previously seen in +// the prior scrape, but are not currently present in this scrape cycle. +func (ss *stalenessStore) emitStaleLabels() (stale []*staleEntry) { + ss.mu.Lock() + defer ss.mu.Unlock() + + for _, labels := range ss.previous { + hash := labels.Hash() + if _, ok := ss.currentHashes[hash]; !ok { + stale = append(stale, &staleEntry{seenAtMs: ss.previousHashes[hash], labels: labels}) + } + } + return stale +} diff --git a/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go index 6673a3b306e..48cc5479627 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go +++ b/internal/otel_collector/receiver/prometheusreceiver/internal/transaction.go @@ -76,6 +76,8 @@ type transaction struct { externalLabels labels.Labels logger *zap.Logger obsrecv *obsreport.Receiver + stalenessStore *stalenessStore + startTimeMs int64 } func newTransaction( @@ -87,7 +89,7 @@ func newTransaction( ms *metadataService, sink consumer.Metrics, externalLabels labels.Labels, - logger *zap.Logger) *transaction { + logger *zap.Logger, stalenessStore *stalenessStore) *transaction { return &transaction{ id: atomic.AddInt64(&idSeq, 1), ctx: ctx, @@ -101,6 +103,8 @@ func newTransaction( externalLabels: externalLabels, logger: logger, obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: receiverID, Transport: transport}), + stalenessStore: stalenessStore, + startTimeMs: -1, } } @@ -109,6 +113,9 @@ var _ storage.Appender = (*transaction)(nil) // Append always returns 0 to disable label caching. func (tr *transaction) Append(ref uint64, ls labels.Labels, t int64, v float64) (uint64, error) { + if tr.startTimeMs < 0 { + tr.startTimeMs = t + } // Important, must handle. prometheus will still try to feed the appender some data even if it failed to // scrape the remote target, if the previous scrape was success and some data were cached internally // in our case, we don't need these data, simply drop them shall be good enough. more details: @@ -131,6 +138,7 @@ func (tr *transaction) Append(ref uint64, ls labels.Labels, t int64, v float64) return 0, err } } + return 0, tr.metricBuilder.AddDataPoint(ls, t, v) } @@ -158,7 +166,7 @@ func (tr *transaction) initTransaction(ls labels.Labels) error { tr.instance = instance } tr.node, tr.resource = createNodeAndResource(job, instance, mc.SharedLabels().Get(model.SchemeLabel)) - tr.metricBuilder = newMetricBuilder(mc, tr.useStartTimeMetric, tr.startTimeMetricRegex, tr.logger) + tr.metricBuilder = newMetricBuilder(mc, tr.useStartTimeMetric, tr.startTimeMetricRegex, tr.logger, tr.stalenessStore) tr.isNew = false return nil } @@ -171,6 +179,15 @@ func (tr *transaction) Commit() error { return nil } + // Before building metrics, issue staleness markers for every stale metric. + staleLabels := tr.stalenessStore.emitStaleLabels() + + for _, sEntry := range staleLabels { + tr.metricBuilder.AddDataPoint(sEntry.labels, sEntry.seenAtMs, stalenessSpecialValue) + } + + tr.startTimeMs = -1 + ctx := tr.obsrecv.StartMetricsOp(tr.ctx) metrics, _, _, err := tr.metricBuilder.Build() if err != nil { @@ -200,7 +217,7 @@ func (tr *transaction) Commit() error { numPoints := 0 if len(metrics) > 0 { md := internaldata.OCToMetrics(tr.node, tr.resource, metrics) - _, numPoints = md.MetricAndDataPointCount() + numPoints = md.DataPointCount() err = tr.sink.ConsumeMetrics(ctx, md) } tr.obsrecv.EndMetricsOp(ctx, dataformat, numPoints, err) @@ -208,6 +225,7 @@ func (tr *transaction) Commit() error { } func (tr *transaction) Rollback() error { + tr.startTimeMs = -1 return nil } diff --git a/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go index af6463e399e..e43ab7c4483 100644 --- a/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go +++ b/internal/otel_collector/receiver/prometheusreceiver/metrics_receiver.go @@ -24,19 +24,18 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/receiver/prometheusreceiver/internal" ) -const transport = "http" - // pReceiver is the type that provides Prometheus scraper/receiver functionality. type pReceiver struct { cfg *Config consumer consumer.Metrics cancelFunc context.CancelFunc - logger *zap.Logger + logger *zap.Logger + scrapeManager *scrape.Manager + ocaStore *internal.OcaStore } // New creates a new prometheus.Receiver reference. @@ -78,9 +77,8 @@ func (r *pReceiver) Start(_ context.Context, host component.Host) error { } // Per component.Component Start instructions, for async operations we should not use the // incoming context, it may get cancelled. - receiverCtx := obsreport.ReceiverContext(context.Background(), r.cfg.ID(), transport) - ocaStore := internal.NewOcaStore( - receiverCtx, + r.ocaStore = internal.NewOcaStore( + context.Background(), r.consumer, r.logger, jobsMap, @@ -89,13 +87,13 @@ func (r *pReceiver) Start(_ context.Context, host component.Host) error { r.cfg.ID(), r.cfg.PrometheusConfig.GlobalConfig.ExternalLabels, ) - scrapeManager := scrape.NewManager(logger, ocaStore) - ocaStore.SetScrapeManager(scrapeManager) - if err := scrapeManager.ApplyConfig(r.cfg.PrometheusConfig); err != nil { + r.scrapeManager = scrape.NewManager(logger, r.ocaStore) + r.ocaStore.SetScrapeManager(r.scrapeManager) + if err := r.scrapeManager.ApplyConfig(r.cfg.PrometheusConfig); err != nil { return err } go func() { - if err := scrapeManager.Run(discoveryManager.SyncCh()); err != nil { + if err := r.scrapeManager.Run(discoveryManager.SyncCh()); err != nil { r.logger.Error("Scrape manager failed", zap.Error(err)) host.ReportFatalError(err) } @@ -106,5 +104,11 @@ func (r *pReceiver) Start(_ context.Context, host component.Host) error { // Shutdown stops and cancels the underlying Prometheus scrapers. func (r *pReceiver) Shutdown(context.Context) error { r.cancelFunc() + // ocaStore (and internally metadataService) needs to stop first to prevent deadlocks. + // When stopping scrapeManager it waits for all scrapes to terminate. However during + // scraping metadataService calls scrapeManager.AllTargets() which acquires + // the same lock that's acquired when scrapeManager is stopped. + r.ocaStore.Close() + r.scrapeManager.Stop() return nil } diff --git a/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml new file mode 100644 index 00000000000..90f40fa88e8 --- /dev/null +++ b/internal/otel_collector/receiver/prometheusreceiver/testdata/invalid-config-prometheus-relabel.yaml @@ -0,0 +1,22 @@ +receivers: + prometheus: + config: + scrape_configs: + - job_name: rename + metric_relabel_configs: + - source_labels: [__name__] + regex: "foo_(.*)" + target_label: __name__ + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + traces: + receivers: [prometheus] + processors: [nop] + exporters: [nop] diff --git a/internal/otel_collector/receiver/receiverhelper/doc.go b/internal/otel_collector/receiver/receiverhelper/doc.go new file mode 100644 index 00000000000..412bb53dd92 --- /dev/null +++ b/internal/otel_collector/receiver/receiverhelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package receiverhelper contains utilities for receivers. +package receiverhelper diff --git a/internal/otel_collector/receiver/scrapererror/doc.go b/internal/otel_collector/receiver/scrapererror/doc.go new file mode 100644 index 00000000000..acc3c48e365 --- /dev/null +++ b/internal/otel_collector/receiver/scrapererror/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scraperror provides custom error types for scrapers. +package scrapererror diff --git a/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go b/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go index 2b23bed620c..6487fd46d5e 100644 --- a/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go +++ b/internal/otel_collector/receiver/scrapererror/partialscrapeerror.go @@ -16,8 +16,8 @@ package scrapererror import "errors" -// PartialScrapeError can be used to signalize that a subset of metrics were failed -// to be scraped +// PartialScrapeError is an error to represent +// that a subset of metrics were failed to be scraped. type PartialScrapeError struct { error Failed int @@ -25,7 +25,7 @@ type PartialScrapeError struct { // NewPartialScrapeError creates PartialScrapeError for failed metrics. // Use this error type only when a subset of data was failed to be scraped. -func NewPartialScrapeError(err error, failed int) error { +func NewPartialScrapeError(err error, failed int) PartialScrapeError { return PartialScrapeError{ error: err, Failed: failed, diff --git a/internal/otel_collector/receiver/scrapererror/scrapeerrors.go b/internal/otel_collector/receiver/scrapererror/scrapeerror.go similarity index 100% rename from internal/otel_collector/receiver/scrapererror/scrapeerrors.go rename to internal/otel_collector/receiver/scrapererror/scrapeerror.go diff --git a/internal/otel_collector/receiver/scraperhelper/doc.go b/internal/otel_collector/receiver/scraperhelper/doc.go new file mode 100644 index 00000000000..a8bdb3877f4 --- /dev/null +++ b/internal/otel_collector/receiver/scraperhelper/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scraperhelper provides utilities for scrapers. +package scraperhelper diff --git a/internal/otel_collector/receiver/scraperhelper/scraper.go b/internal/otel_collector/receiver/scraperhelper/scraper.go index 6dbab5a4e56..d891431ec8d 100644 --- a/internal/otel_collector/receiver/scraperhelper/scraper.go +++ b/internal/otel_collector/receiver/scraperhelper/scraper.go @@ -20,8 +20,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenthelper" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/receiver/scrapererror" ) // ScrapeMetrics scrapes metrics. @@ -37,28 +38,15 @@ type baseSettings struct { // ScraperOption apply changes to internal options. type ScraperOption func(*baseSettings) -// BaseScraper is the base interface for scrapers. -type BaseScraper interface { +// Scraper is the base interface for scrapers. +type Scraper interface { component.Component // ID returns the scraper id. ID() config.ComponentID + Scrape(context.Context, config.ComponentID) (pdata.Metrics, error) } -// MetricsScraper is an interface for scrapers that scrape metrics. -type MetricsScraper interface { - BaseScraper - Scrape(context.Context, config.ComponentID) (pdata.MetricSlice, error) -} - -// ResourceMetricsScraper is an interface for scrapers that scrape resource metrics. -type ResourceMetricsScraper interface { - BaseScraper - Scrape(context.Context, config.ComponentID) (pdata.ResourceMetricsSlice, error) -} - -var _ BaseScraper = (*baseScraper)(nil) - type baseScraper struct { component.Component id config.ComponentID @@ -87,7 +75,7 @@ type metricsScraper struct { ScrapeMetrics } -var _ MetricsScraper = (*metricsScraper)(nil) +var _ Scraper = (*metricsScraper)(nil) // NewMetricsScraper creates a Scraper that calls Scrape at the specified // collection interval, reports observability information, and passes the @@ -96,7 +84,7 @@ func NewMetricsScraper( name string, scrape ScrapeMetrics, options ...ScraperOption, -) MetricsScraper { +) Scraper { set := &baseSettings{} for _, op := range options { op(set) @@ -113,16 +101,20 @@ func NewMetricsScraper( return ms } -func (ms metricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.MetricSlice, error) { +func (ms metricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.Metrics, error) { ctx = obsreport.ScraperContext(ctx, receiverID, ms.ID()) - ctx = obsreport.StartMetricsScrapeOp(ctx, receiverID, ms.ID()) + scrp := obsreport.NewScraper(obsreport.ScraperSettings{ReceiverID: receiverID, Scraper: ms.ID()}) + ctx = scrp.StartMetricsOp(ctx) metrics, err := ms.ScrapeMetrics(ctx) count := 0 - if err == nil { - count = metrics.Len() + md := pdata.Metrics{} + if err == nil || scrapererror.IsPartialScrapeError(err) { + md = pdata.NewMetrics() + metrics.MoveAndAppendTo(md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics()) + count = md.MetricCount() } - obsreport.EndMetricsScrapeOp(ctx, count, err) - return metrics, err + scrp.EndMetricsOp(ctx, count, err) + return md, err } type resourceMetricsScraper struct { @@ -130,7 +122,7 @@ type resourceMetricsScraper struct { ScrapeResourceMetrics } -var _ ResourceMetricsScraper = (*resourceMetricsScraper)(nil) +var _ Scraper = (*resourceMetricsScraper)(nil) // NewResourceMetricsScraper creates a Scraper that calls Scrape at the // specified collection interval, reports observability information, and @@ -139,7 +131,7 @@ func NewResourceMetricsScraper( id config.ComponentID, scrape ScrapeResourceMetrics, options ...ScraperOption, -) ResourceMetricsScraper { +) Scraper { set := &baseSettings{} for _, op := range options { op(set) @@ -156,27 +148,20 @@ func NewResourceMetricsScraper( return rms } -func (rms resourceMetricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.ResourceMetricsSlice, error) { +func (rms resourceMetricsScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.Metrics, error) { ctx = obsreport.ScraperContext(ctx, receiverID, rms.ID()) - ctx = obsreport.StartMetricsScrapeOp(ctx, receiverID, rms.ID()) + scrp := obsreport.NewScraper(obsreport.ScraperSettings{ReceiverID: receiverID, Scraper: rms.ID()}) + ctx = scrp.StartMetricsOp(ctx) resourceMetrics, err := rms.ScrapeResourceMetrics(ctx) - count := 0 - if err == nil { - count = metricCount(resourceMetrics) - } - obsreport.EndMetricsScrapeOp(ctx, count, err) - return resourceMetrics, err -} -func metricCount(resourceMetrics pdata.ResourceMetricsSlice) int { count := 0 - - for i := 0; i < resourceMetrics.Len(); i++ { - ilm := resourceMetrics.At(i).InstrumentationLibraryMetrics() - for j := 0; j < ilm.Len(); j++ { - count += ilm.At(j).Metrics().Len() - } + md := pdata.Metrics{} + if err == nil || scrapererror.IsPartialScrapeError(err) { + md = pdata.NewMetrics() + resourceMetrics.MoveAndAppendTo(md.ResourceMetrics()) + count = md.MetricCount() } - return count + scrp.EndMetricsOp(ctx, count, err) + return md, err } diff --git a/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go index a7aa9f0c848..a7f994ea114 100644 --- a/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go +++ b/internal/otel_collector/receiver/scraperhelper/scrapercontroller.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -51,26 +51,14 @@ func DefaultScraperControllerSettings(cfgType config.Type) ScraperControllerSett // ScraperControllerOption apply changes to internal options. type ScraperControllerOption func(*controller) -// AddMetricsScraper configures the provided scrape function to be called +// AddScraper configures the provided scrape function to be called // with the specified options, and at the specified collection interval. // // Observability information will be reported, and the scraped metrics // will be passed to the next consumer. -func AddMetricsScraper(scraper MetricsScraper) ScraperControllerOption { +func AddScraper(scraper Scraper) ScraperControllerOption { return func(o *controller) { - o.metricsScrapers.scrapers = append(o.metricsScrapers.scrapers, scraper) - } -} - -// AddResourceMetricsScraper configures the provided scrape function to -// be called with the specified options, and at the specified collection -// interval. -// -// Observability information will be reported, and the scraped resource -// metrics will be passed to the next consumer. -func AddResourceMetricsScraper(scraper ResourceMetricsScraper) ScraperControllerOption { - return func(o *controller) { - o.resourceMetricScrapers = append(o.resourceMetricScrapers, scraper) + o.scrapers = append(o.scrapers, scraper) } } @@ -89,8 +77,7 @@ type controller struct { collectionInterval time.Duration nextConsumer consumer.Metrics - metricsScrapers *multiMetricScraper - resourceMetricScrapers []ResourceMetricsScraper + scrapers []Scraper tickerCh <-chan time.Time @@ -121,7 +108,6 @@ func NewScraperControllerReceiver( logger: logger, collectionInterval: cfg.CollectionInterval, nextConsumer: nextConsumer, - metricsScrapers: &multiMetricScraper{}, done: make(chan struct{}), terminated: make(chan struct{}), obsrecv: obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: cfg.ID(), Transport: ""}), @@ -131,16 +117,12 @@ func NewScraperControllerReceiver( op(sc) } - if len(sc.metricsScrapers.scrapers) > 0 { - sc.resourceMetricScrapers = append(sc.resourceMetricScrapers, sc.metricsScrapers) - } - return sc, nil } // Start the receiver, invoked during service start. func (sc *controller) Start(ctx context.Context, host component.Host) error { - for _, scraper := range sc.resourceMetricScrapers { + for _, scraper := range sc.scrapers { if err := scraper.Start(ctx, host); err != nil { return err } @@ -161,11 +143,12 @@ func (sc *controller) Shutdown(ctx context.Context) error { } var errs []error - for _, scraper := range sc.resourceMetricScrapers { + for _, scraper := range sc.scrapers { if err := scraper.Shutdown(ctx); err != nil { errs = append(errs, err) } } + return consumererror.Combine(errs) } @@ -196,23 +179,21 @@ func (sc *controller) startScraping() { // Scrapers, records observability information, and passes the scraped metrics // to the next component. func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { - ctx = obsreport.ReceiverContext(ctx, sc.id, "") metrics := pdata.NewMetrics() - for _, rms := range sc.resourceMetricScrapers { - resourceMetrics, err := rms.Scrape(ctx, sc.id) + for _, scraper := range sc.scrapers { + md, err := scraper.Scrape(ctx, sc.id) if err != nil { - sc.logger.Error("Error scraping metrics", zap.Error(err)) + sc.logger.Error("Error scraping metrics", zap.Error(err), zap.Stringer("scraper", scraper.ID())) if !scrapererror.IsPartialScrapeError(err) { continue } } - resourceMetrics.MoveAndAppendTo(metrics.ResourceMetrics()) + md.ResourceMetrics().MoveAndAppendTo(metrics.ResourceMetrics()) } - _, dataPointCount := metrics.MetricAndDataPointCount() - + dataPointCount := metrics.DataPointCount() ctx = sc.obsrecv.StartMetricsOp(ctx) err := sc.nextConsumer.ConsumeMetrics(ctx, metrics) sc.obsrecv.EndMetricsOp(ctx, "", dataPointCount, err) @@ -222,53 +203,3 @@ func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { func (sc *controller) stopScraping() { close(sc.done) } - -var _ ResourceMetricsScraper = (*multiMetricScraper)(nil) - -type multiMetricScraper struct { - scrapers []MetricsScraper -} - -func (mms *multiMetricScraper) ID() config.ComponentID { - return config.NewID("") -} - -func (mms *multiMetricScraper) Start(ctx context.Context, host component.Host) error { - for _, scraper := range mms.scrapers { - if err := scraper.Start(ctx, host); err != nil { - return err - } - } - return nil -} - -func (mms *multiMetricScraper) Shutdown(ctx context.Context) error { - var errs []error - for _, scraper := range mms.scrapers { - if err := scraper.Shutdown(ctx); err != nil { - errs = append(errs, err) - } - } - return consumererror.Combine(errs) -} - -func (mms *multiMetricScraper) Scrape(ctx context.Context, receiverID config.ComponentID) (pdata.ResourceMetricsSlice, error) { - rms := pdata.NewResourceMetricsSlice() - ilm := rms.AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty() - - var errs scrapererror.ScrapeErrors - for _, scraper := range mms.scrapers { - metrics, err := scraper.Scrape(ctx, receiverID) - if err != nil { - partialErr, isPartial := err.(scrapererror.PartialScrapeError) - if isPartial { - errs.AddPartial(partialErr.Failed, partialErr) - } - - continue - } - - metrics.MoveAndAppendTo(ilm.Metrics()) - } - return rms, errs.Combine() -} diff --git a/internal/otel_collector/receiver/zipkinreceiver/doc.go b/internal/otel_collector/receiver/zipkinreceiver/doc.go new file mode 100644 index 00000000000..1b5b449cd46 --- /dev/null +++ b/internal/otel_collector/receiver/zipkinreceiver/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zipkinreceiver receives Zipkin traces. +package zipkinreceiver diff --git a/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go index affa3d11e93..cb3e52aebed 100644 --- a/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go +++ b/internal/otel_collector/receiver/zipkinreceiver/trace_receiver.go @@ -18,7 +18,6 @@ import ( "compress/gzip" "compress/zlib" "context" - "encoding/json" "errors" "io" "io/ioutil" @@ -27,18 +26,15 @@ import ( "strings" "sync" - jaegerzipkin "github.com/jaegertracing/jaeger/model/converter/thrift/zipkin" - zipkinmodel "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" - "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" - "go.opentelemetry.io/collector/translator/trace/zipkin" + "go.opentelemetry.io/collector/translator/trace/zipkinv1" + "go.opentelemetry.io/collector/translator/trace/zipkinv2" ) const ( @@ -52,9 +48,6 @@ var errNextConsumerRespBody = []byte(`"Internal Server Error"`) // ZipkinReceiver type is used to handle spans received in the Zipkin format. type ZipkinReceiver struct { - // mu protects the fields of this struct - mu sync.Mutex - // addr is the address onto which the HTTP server will be bound host component.Host nextConsumer consumer.Traces @@ -63,6 +56,12 @@ type ZipkinReceiver struct { shutdownWG sync.WaitGroup server *http.Server config *Config + + v1ThriftUnmarshaler pdata.TracesUnmarshaler + v1JSONUnmarshaler pdata.TracesUnmarshaler + jsonUnmarshaler pdata.TracesUnmarshaler + protobufUnmarshaler pdata.TracesUnmarshaler + protobufDebugUnmarshaler pdata.TracesUnmarshaler } var _ http.Handler = (*ZipkinReceiver)(nil) @@ -74,9 +73,14 @@ func New(config *Config, nextConsumer consumer.Traces) (*ZipkinReceiver, error) } zr := &ZipkinReceiver{ - nextConsumer: nextConsumer, - id: config.ID(), - config: config, + nextConsumer: nextConsumer, + id: config.ID(), + config: config, + v1ThriftUnmarshaler: zipkinv1.NewThriftTracesUnmarshaler(), + v1JSONUnmarshaler: zipkinv1.NewJSONTracesUnmarshaler(config.ParseStringTags), + jsonUnmarshaler: zipkinv2.NewJSONTracesUnmarshaler(config.ParseStringTags), + protobufUnmarshaler: zipkinv2.NewProtobufTracesUnmarshaler(false, config.ParseStringTags), + protobufDebugUnmarshaler: zipkinv2.NewProtobufTracesUnmarshaler(true, config.ParseStringTags), } return zr, nil } @@ -87,9 +91,6 @@ func (zr *ZipkinReceiver) Start(_ context.Context, host component.Host) error { return errors.New("nil host") } - zr.mu.Lock() - defer zr.mu.Unlock() - zr.host = host zr.server = zr.config.HTTPServerSettings.ToServer(zr) var listener net.Listener @@ -112,14 +113,9 @@ func (zr *ZipkinReceiver) Start(_ context.Context, host component.Host) error { // v1ToTraceSpans parses Zipkin v1 JSON traces and converts them to OpenCensus Proto spans. func (zr *ZipkinReceiver) v1ToTraceSpans(blob []byte, hdr http.Header) (reqs pdata.Traces, err error) { if hdr.Get("Content-Type") == "application/x-thrift" { - zSpans, err := jaegerzipkin.DeserializeThrift(blob) - if err != nil { - return pdata.NewTraces(), err - } - - return zipkin.V1ThriftBatchToInternalTraces(zSpans) + return zr.v1ThriftUnmarshaler.UnmarshalTraces(blob) } - return zipkin.V1JSONBatchToInternalTraces(blob, zr.config.ParseStringTags) + return zr.v1JSONUnmarshaler.UnmarshalTraces(blob) } // v2ToTraceSpans parses Zipkin v2 JSON or Protobuf traces and converts them to OpenCensus Proto spans. @@ -128,30 +124,20 @@ func (zr *ZipkinReceiver) v2ToTraceSpans(blob []byte, hdr http.Header) (reqs pda // https://github.com/openzipkin/zipkin-go/blob/3793c981d4f621c0e3eb1457acffa2c1cc591384/proto/v2/zipkin.proto#L154 debugWasSet := hdr.Get("X-B3-Flags") == "1" - var zipkinSpans []*zipkinmodel.SpanModel + // By default, we'll assume using JSON + unmarshaler := zr.jsonUnmarshaler // Zipkin can send protobuf via http - switch hdr.Get("Content-Type") { - // TODO: (@odeke-em) record the unique types of Content-Type uploads - case "application/x-protobuf": - zipkinSpans, err = zipkin_proto3.ParseSpans(blob, debugWasSet) - - default: // By default, we'll assume using JSON - zipkinSpans, err = zr.deserializeFromJSON(blob) - } - - if err != nil { - return pdata.Traces{}, err + if hdr.Get("Content-Type") == "application/x-protobuf" { + // TODO: (@odeke-em) record the unique types of Content-Type uploads + if debugWasSet { + unmarshaler = zr.protobufDebugUnmarshaler + } else { + unmarshaler = zr.protobufUnmarshaler + } } - return zipkin.V2SpansToInternalTraces(zipkinSpans, zr.config.ParseStringTags) -} - -func (zr *ZipkinReceiver) deserializeFromJSON(jsonBlob []byte) (zs []*zipkinmodel.SpanModel, err error) { - if err = json.Unmarshal(jsonBlob, &zs); err != nil { - return nil, err - } - return zs, nil + return unmarshaler.UnmarshalTraces(blob) } // Shutdown tells the receiver that should stop reception, @@ -216,7 +202,6 @@ func (zr *ZipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { asZipkinv1 := r.URL != nil && strings.Contains(r.URL.Path, "api/v1/spans") transportTag := transportType(r, asZipkinv1) - ctx = obsreport.ReceiverContext(ctx, zr.id, transportTag) obsrecv := obsreport.NewReceiver(obsreport.ReceiverSettings{ReceiverID: zr.id, Transport: transportTag}) ctx = obsrecv.StartTracesOp(ctx) diff --git a/internal/otel_collector/service/application.go b/internal/otel_collector/service/collector.go similarity index 52% rename from internal/otel_collector/service/application.go rename to internal/otel_collector/service/collector.go index fb662339257..ecbee06393f 100644 --- a/internal/otel_collector/service/application.go +++ b/internal/otel_collector/service/collector.go @@ -46,7 +46,7 @@ const ( extensionzPath = "extensionz" ) -// State defines Application's state. +// State defines Collector's state. type State int const ( @@ -56,8 +56,19 @@ const ( Closed ) -// Application represents a collector application -type Application struct { +// (Internal note) Collector Lifecycle: +// - New constructs a new Collector. +// - Run starts the collector and calls (*Collector).execute. +// - execute calls setupConfigurationComponents to handle configuration. +// If configuration parser fails, collector's config can be reloaded. +// Collector can be shutdown if parser gets a shutdown error. +// - execute runs runAndWaitForShutdownEvent and waits for a shutdown event. +// SIGINT and SIGTERM, errors, and (*Collector).Shutdown can trigger the shutdown events. +// - Upon shutdown, pipelines are notified, then pipelines and extensions are shut down. +// - Users can call (*Collector).Shutdown anytime to shutdown the collector. + +// Collector represents a server providing the OpenTelemetry Collector service. +type Collector struct { info component.BuildInfo rootCmd *cobra.Command logger *zap.Logger @@ -69,26 +80,31 @@ type Application struct { parserProvider parserprovider.ParserProvider - // stopTestChan is used to terminate the application in end to end tests. - stopTestChan chan struct{} + // shutdownChan is used to terminate the collector. + shutdownChan chan struct{} // signalsChannel is used to receive termination signals from the OS. signalsChannel chan os.Signal + allowGracefulShutodwn bool + // asyncErrorChannel is used to signal a fatal error from any component. asyncErrorChannel chan error } -// New creates and returns a new instance of Application. -func New(set AppSettings) (*Application, error) { +// New creates and returns a new instance of Collector. +func New(set CollectorSettings) (*Collector, error) { if err := configcheck.ValidateConfigFromFactories(set.Factories); err != nil { return nil, err } - app := &Application{ + col := &Collector{ info: set.BuildInfo, factories: set.Factories, stateChannel: make(chan State, Closed+1), + // We use a negative in the settings not to break the existing + // behavior. Internally, allowGracefulShutodwn is more readable. + allowGracefulShutodwn: !set.DisableGracefulShutdown, } rootCmd := &cobra.Command{ @@ -96,11 +112,11 @@ func New(set AppSettings) (*Application, error) { Version: set.BuildInfo.Version, RunE: func(cmd *cobra.Command, args []string) error { var err error - if app.logger, err = newLogger(set.LoggingOptions); err != nil { + if col.logger, err = newLogger(set.LoggingOptions); err != nil { return fmt.Errorf("failed to get logger: %w", err) } - return app.execute(context.Background()) + return col.execute(cmd.Context()) }, } @@ -117,59 +133,61 @@ func New(set AppSettings) (*Application, error) { addFlags(flagSet) } rootCmd.Flags().AddGoFlagSet(flagSet) - app.rootCmd = rootCmd + col.rootCmd = rootCmd parserProvider := set.ParserProvider if parserProvider == nil { // use default provider. parserProvider = parserprovider.Default() } - app.parserProvider = parserProvider + col.parserProvider = parserProvider - return app, nil + return col, nil } // Run starts the collector according to the command and configuration // given by the user, and waits for it to complete. -func (app *Application) Run() error { +// Consecutive calls to Run are not allowed, Run shouldn't be called +// once a collector is shut down. +func (col *Collector) Run() error { // From this point on do not show usage in case of error. - app.rootCmd.SilenceUsage = true + col.rootCmd.SilenceUsage = true - return app.rootCmd.Execute() + return col.rootCmd.Execute() } -// GetStateChannel returns state channel of the application. -func (app *Application) GetStateChannel() chan State { - return app.stateChannel +// GetStateChannel returns state channel of the collector server. +func (col *Collector) GetStateChannel() chan State { + return col.stateChannel } -// Command returns Application's root command. -func (app *Application) Command() *cobra.Command { - return app.rootCmd +// Command returns Collector's root command. +func (col *Collector) Command() *cobra.Command { + return col.rootCmd } -// GetLogger returns logger used by the Application. -// The logger is initialized after application start. -func (app *Application) GetLogger() *zap.Logger { - return app.logger +// GetLogger returns logger used by the Collector. +// The logger is initialized after collector server start. +func (col *Collector) GetLogger() *zap.Logger { + return col.logger } -// Shutdown shuts down the application. -func (app *Application) Shutdown() { +// Shutdown shuts down the collector server. +func (col *Collector) Shutdown() { // TODO: Implement a proper shutdown with graceful draining of the pipeline. // See https://github.com/open-telemetry/opentelemetry-collector/issues/483. defer func() { if r := recover(); r != nil { - app.logger.Info("stopTestChan already closed") + col.logger.Info("shutdownChan already closed") } }() - close(app.stopTestChan) + close(col.shutdownChan) } -func (app *Application) setupTelemetry(ballastSizeBytes uint64) error { - app.logger.Info("Setting up own telemetry...") +func (col *Collector) setupTelemetry(ballastSizeBytes uint64) error { + col.logger.Info("Setting up own telemetry...") - err := applicationTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger) + err := collectorTelemetry.init(col.asyncErrorChannel, ballastSizeBytes, col.logger) if err != nil { return fmt.Errorf("failed to initialize telemetry: %w", err) } @@ -178,38 +196,39 @@ func (app *Application) setupTelemetry(ballastSizeBytes uint64) error { } // runAndWaitForShutdownEvent waits for one of the shutdown events that can happen. -func (app *Application) runAndWaitForShutdownEvent() { - app.logger.Info("Everything is ready. Begin running and processing data.") +func (col *Collector) runAndWaitForShutdownEvent() { + col.logger.Info("Everything is ready. Begin running and processing data.") - // plug SIGTERM signal into a channel. - app.signalsChannel = make(chan os.Signal, 1) - signal.Notify(app.signalsChannel, os.Interrupt, syscall.SIGTERM) + col.signalsChannel = make(chan os.Signal, 1) + // Only notify with SIGTERM and SIGINT if graceful shutdown is enabled. + if col.allowGracefulShutodwn { + signal.Notify(col.signalsChannel, os.Interrupt, syscall.SIGTERM) + } - // set the channel to stop testing. - app.stopTestChan = make(chan struct{}) - app.stateChannel <- Running + col.shutdownChan = make(chan struct{}) + col.stateChannel <- Running select { - case err := <-app.asyncErrorChannel: - app.logger.Error("Asynchronous error received, terminating process", zap.Error(err)) - case s := <-app.signalsChannel: - app.logger.Info("Received signal from OS", zap.String("signal", s.String())) - case <-app.stopTestChan: - app.logger.Info("Received stop test request") + case err := <-col.asyncErrorChannel: + col.logger.Error("Asynchronous error received, terminating process", zap.Error(err)) + case s := <-col.signalsChannel: + col.logger.Info("Received signal from OS", zap.String("signal", s.String())) + case <-col.shutdownChan: + col.logger.Info("Received shutdown request") } - app.stateChannel <- Closing + col.stateChannel <- Closing } // setupConfigurationComponents loads the config and starts the components. If all the steps succeeds it -// sets the app.service with the service currently running. -func (app *Application) setupConfigurationComponents(ctx context.Context) error { - app.logger.Info("Loading configuration...") +// sets the col.service with the service currently running. +func (col *Collector) setupConfigurationComponents(ctx context.Context) error { + col.logger.Info("Loading configuration...") - cp, err := app.parserProvider.Get() + cp, err := col.parserProvider.Get() if err != nil { return fmt.Errorf("cannot load configuration's parser: %w", err) } - cfg, err := configloader.Load(cp, app.factories) + cfg, err := configloader.Load(cp, col.factories) if err != nil { return fmt.Errorf("cannot load configuration: %w", err) } @@ -218,14 +237,14 @@ func (app *Application) setupConfigurationComponents(ctx context.Context) error return fmt.Errorf("invalid configuration: %w", err) } - app.logger.Info("Applying configuration...") + col.logger.Info("Applying configuration...") service, err := newService(&svcSettings{ - BuildInfo: app.info, - Factories: app.factories, + BuildInfo: col.info, + Factories: col.factories, Config: cfg, - Logger: app.logger, - AsyncErrorChannel: app.asyncErrorChannel, + Logger: col.logger, + AsyncErrorChannel: col.asyncErrorChannel, }) if err != nil { return err @@ -236,21 +255,21 @@ func (app *Application) setupConfigurationComponents(ctx context.Context) error return err } - app.service = service + col.service = service // If provider is watchable start a goroutine watching for updates. - if watchable, ok := app.parserProvider.(parserprovider.Watchable); ok { + if watchable, ok := col.parserProvider.(parserprovider.Watchable); ok { go func() { err := watchable.WatchForUpdate() switch { // TODO: Move configsource.ErrSessionClosed to providerparser package to avoid depending on configsource. case errors.Is(err, configsource.ErrSessionClosed): - // This is the case of shutdown of the whole application, nothing to do. - app.logger.Info("Config WatchForUpdate closed", zap.Error(err)) + // This is the case of shutdown of the whole collector server, nothing to do. + col.logger.Info("Config WatchForUpdate closed", zap.Error(err)) return default: - app.logger.Warn("Config WatchForUpdated exited", zap.Error(err)) - app.reloadService(context.Background()) + col.logger.Warn("Config WatchForUpdated exited", zap.Error(err)) + col.reloadService(context.Background()) } }() } @@ -258,92 +277,92 @@ func (app *Application) setupConfigurationComponents(ctx context.Context) error return nil } -func (app *Application) execute(ctx context.Context) error { - app.logger.Info("Starting "+app.info.Command+"...", - zap.String("Version", app.info.Version), +func (col *Collector) execute(ctx context.Context) error { + col.logger.Info("Starting "+col.info.Command+"...", + zap.String("Version", col.info.Version), zap.Int("NumCPU", runtime.NumCPU()), ) - app.stateChannel <- Starting + col.stateChannel <- Starting // Set memory ballast - ballast, ballastSizeBytes := app.createMemoryBallast() + ballast, ballastSizeBytes := col.createMemoryBallast() - app.asyncErrorChannel = make(chan error) + col.asyncErrorChannel = make(chan error) // Setup everything. - err := app.setupTelemetry(ballastSizeBytes) + err := col.setupTelemetry(ballastSizeBytes) if err != nil { return err } - err = app.setupConfigurationComponents(ctx) + err = col.setupConfigurationComponents(ctx) if err != nil { return err } // Everything is ready, now run until an event requiring shutdown happens. - app.runAndWaitForShutdownEvent() + col.runAndWaitForShutdownEvent() // Accumulate errors and proceed with shutting down remaining components. var errs []error // Begin shutdown sequence. runtime.KeepAlive(ballast) - app.logger.Info("Starting shutdown...") + col.logger.Info("Starting shutdown...") - if closable, ok := app.parserProvider.(parserprovider.Closeable); ok { + if closable, ok := col.parserProvider.(parserprovider.Closeable); ok { if err := closable.Close(ctx); err != nil { errs = append(errs, fmt.Errorf("failed to close config: %w", err)) } } - if app.service != nil { - if err := app.service.Shutdown(ctx); err != nil { + if col.service != nil { + if err := col.service.Shutdown(ctx); err != nil { errs = append(errs, fmt.Errorf("failed to shutdown service: %w", err)) } } - if err := applicationTelemetry.shutdown(); err != nil { - errs = append(errs, fmt.Errorf("failed to shutdown application telemetry: %w", err)) + if err := collectorTelemetry.shutdown(); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown collector telemetry: %w", err)) } - app.logger.Info("Shutdown complete.") - app.stateChannel <- Closed - close(app.stateChannel) + col.logger.Info("Shutdown complete.") + col.stateChannel <- Closed + close(col.stateChannel) return consumererror.Combine(errs) } -func (app *Application) createMemoryBallast() ([]byte, uint64) { +func (col *Collector) createMemoryBallast() ([]byte, uint64) { ballastSizeMiB := builder.MemBallastSize() if ballastSizeMiB > 0 { ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024 ballast := make([]byte, ballastSizeBytes) - app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB)) + col.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB)) return ballast, ballastSizeBytes } return nil, 0 } -// reloadService shutdowns the current app.service and setups a new one according -// to the latest configuration. It requires that app.parserProvider and app.factories +// reloadService shutdowns the current col.service and setups a new one according +// to the latest configuration. It requires that col.parserProvider and col.factories // are properly populated to finish successfully. -func (app *Application) reloadService(ctx context.Context) error { - if closeable, ok := app.parserProvider.(parserprovider.Closeable); ok { +func (col *Collector) reloadService(ctx context.Context) error { + if closeable, ok := col.parserProvider.(parserprovider.Closeable); ok { if err := closeable.Close(ctx); err != nil { return fmt.Errorf("failed close current config provider: %w", err) } } - if app.service != nil { - retiringService := app.service - app.service = nil + if col.service != nil { + retiringService := col.service + col.service = nil if err := retiringService.Shutdown(ctx); err != nil { return fmt.Errorf("failed to shutdown the retiring config: %w", err) } } - if err := app.setupConfigurationComponents(ctx); err != nil { + if err := col.setupConfigurationComponents(ctx); err != nil { return fmt.Errorf("failed to setup configuration components: %w", err) } diff --git a/internal/otel_collector/service/application_windows.go b/internal/otel_collector/service/collector_windows.go similarity index 80% rename from internal/otel_collector/service/application_windows.go rename to internal/otel_collector/service/collector_windows.go index 5420ab3c481..99e1f5e8aec 100644 --- a/internal/otel_collector/service/application_windows.go +++ b/internal/otel_collector/service/collector_windows.go @@ -27,11 +27,11 @@ import ( ) type WindowsService struct { - settings AppSettings - app *Application + settings CollectorSettings + col *Collector } -func NewWindowsService(set AppSettings) *WindowsService { +func NewWindowsService(set CollectorSettings) *WindowsService { return &WindowsService{settings: set} } @@ -48,10 +48,10 @@ func (s *WindowsService) Execute(args []string, requests <-chan svc.ChangeReques return false, 1501 // 1501: ERROR_EVENTLOG_CANT_START } - appErrorChannel := make(chan error, 1) + colErrorChannel := make(chan error, 1) changes <- svc.Status{State: svc.StartPending} - if err = s.start(elog, appErrorChannel); err != nil { + if err = s.start(elog, colErrorChannel); err != nil { elog.Error(3, fmt.Sprintf("failed to start service: %v", err)) return false, 1064 // 1064: ERROR_EXCEPTION_IN_SERVICE } @@ -64,7 +64,7 @@ func (s *WindowsService) Execute(args []string, requests <-chan svc.ChangeReques case svc.Stop, svc.Shutdown: changes <- svc.Status{State: svc.StopPending} - if err := s.stop(appErrorChannel); err != nil { + if err := s.stop(colErrorChannel); err != nil { elog.Error(3, fmt.Sprintf("errors occurred while shutting down the service: %v", err)) } changes <- svc.Status{State: svc.Stopped} @@ -79,36 +79,36 @@ func (s *WindowsService) Execute(args []string, requests <-chan svc.ChangeReques return false, 0 } -func (s *WindowsService) start(elog *eventlog.Log, appErrorChannel chan error) error { +func (s *WindowsService) start(elog *eventlog.Log, colErrorChannel chan error) error { var err error - s.app, err = newWithWindowsEventLogCore(s.settings, elog) + s.col, err = newWithWindowsEventLogCore(s.settings, elog) if err != nil { return err } - // app.Start blocks until receiving a SIGTERM signal, so needs to be started + // col.Start blocks until receiving a SIGTERM signal, so needs to be started // asynchronously, but it will exit early if an error occurs on startup - go func() { appErrorChannel <- s.app.Run() }() + go func() { colErrorChannel <- s.col.Run() }() - // wait until the app is in the Running state + // wait until the collector server is in the Running state go func() { - for state := range s.app.GetStateChannel() { + for state := range s.col.GetStateChannel() { if state == Running { - appErrorChannel <- nil + colErrorChannel <- nil break } } }() - // wait until the app is in the Running state, or an error was returned - return <-appErrorChannel + // wait until the collector server is in the Running state, or an error was returned + return <-colErrorChannel } -func (s *WindowsService) stop(appErrorChannel chan error) error { - // simulate a SIGTERM signal to terminate the application - s.app.signalsChannel <- syscall.SIGTERM - // return the response of app.Start - return <-appErrorChannel +func (s *WindowsService) stop(colErrorChannel chan error) error { + // simulate a SIGTERM signal to terminate the collector server + s.col.signalsChannel <- syscall.SIGTERM + // return the response of col.Start + return <-colErrorChannel } func openEventLog(serviceName string) (*eventlog.Log, error) { @@ -120,7 +120,7 @@ func openEventLog(serviceName string) (*eventlog.Log, error) { return elog, nil } -func newWithWindowsEventLogCore(set AppSettings, elog *eventlog.Log) (*Application, error) { +func newWithWindowsEventLogCore(set CollectorSettings, elog *eventlog.Log) (*Collector, error) { set.LoggingOptions = append( set.LoggingOptions, zap.WrapCore(withWindowsCore(elog)), diff --git a/internal/otel_collector/service/internal/builder/builder.go b/internal/otel_collector/service/internal/builder/builder.go index 60252f0fcb6..7438445f243 100644 --- a/internal/otel_collector/service/internal/builder/builder.go +++ b/internal/otel_collector/service/internal/builder/builder.go @@ -35,7 +35,7 @@ var ( memBallastSize *uint ) -// Flags adds flags related to basic building of the collector application to the given flagset. +// Flags adds flags related to basic building of the collector server to the given flagset. func Flags(flags *flag.FlagSet) { memBallastSize = flags.Uint(memBallastFlag, 0, fmt.Sprintf("Flag to specify size of memory (MiB) ballast to set. Ballast is not used when this is not specified. "+ diff --git a/internal/otel_collector/service/internal/builder/exporters_builder.go b/internal/otel_collector/service/internal/builder/exporters_builder.go index 6925ee5550e..21c13162848 100644 --- a/internal/otel_collector/service/internal/builder/exporters_builder.go +++ b/internal/otel_collector/service/internal/builder/exporters_builder.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -140,44 +141,40 @@ type dataTypeRequirements map[config.DataType]dataTypeRequirement // Data type requirements for all exporters. type exportersRequiredDataTypes map[config.Exporter]dataTypeRequirements -// exportersBuilder builds exporters from config. -type exportersBuilder struct { - logger *zap.Logger - buildInfo component.BuildInfo - config *config.Config - factories map[config.Type]component.ExporterFactory -} - // BuildExporters builds Exporters from config. func BuildExporters( logger *zap.Logger, + tracerProvider trace.TracerProvider, buildInfo component.BuildInfo, config *config.Config, factories map[config.Type]component.ExporterFactory, ) (Exporters, error) { - eb := &exportersBuilder{logger.With(zap.String(zapKindKey, zapKindLogExporter)), buildInfo, config, factories} + logger = logger.With(zap.String(zapKindKey, zapKindLogExporter)) // We need to calculate required input data types for each exporter so that we know // which data type must be started for each exporter. - exporterInputDataTypes := eb.calcExportersRequiredDataTypes() + exporterInputDataTypes := calcExportersRequiredDataTypes(config) exporters := make(Exporters) // BuildExporters exporters based on configuration and required input data types. - for _, cfg := range eb.config.Exporters { - componentLogger := eb.logger.With(zap.Stringer(zapNameKey, cfg.ID())) - exp, err := eb.buildExporter(context.Background(), componentLogger, eb.buildInfo, cfg, exporterInputDataTypes) + for _, expCfg := range config.Exporters { + set := component.ExporterCreateSettings{ + Logger: logger.With(zap.Stringer(zapNameKey, expCfg.ID())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + exp, err := buildExporter(context.Background(), factories, set, expCfg, exporterInputDataTypes) if err != nil { return nil, err } - exporters[cfg] = exp + exporters[expCfg] = exp } return exporters, nil } -func (eb *exportersBuilder) calcExportersRequiredDataTypes() exportersRequiredDataTypes { - +func calcExportersRequiredDataTypes(config *config.Config) exportersRequiredDataTypes { // Go over all pipelines. The data type of the pipeline defines what data type // each exporter is expected to receive. Collect all required types for each // exporter. @@ -190,11 +187,11 @@ func (eb *exportersBuilder) calcExportersRequiredDataTypes() exportersRequiredDa result := make(exportersRequiredDataTypes) // Iterate over pipelines. - for _, pipeline := range eb.config.Service.Pipelines { + for _, pipeline := range config.Service.Pipelines { // Iterate over all exporters for this pipeline. for _, expName := range pipeline.Exporters { // Find the exporter config by name. - exporter := eb.config.Exporters[expName] + exporter := config.Exporters[expName] // Create the data type requirement for the exporter if it does not exist. if result[exporter] == nil { @@ -209,34 +206,29 @@ func (eb *exportersBuilder) calcExportersRequiredDataTypes() exportersRequiredDa return result } -func (eb *exportersBuilder) buildExporter( +func buildExporter( ctx context.Context, - logger *zap.Logger, - buildInfo component.BuildInfo, + factories map[config.Type]component.ExporterFactory, + set component.ExporterCreateSettings, cfg config.Exporter, exportersInputDataTypes exportersRequiredDataTypes, ) (*builtExporter, error) { - factory := eb.factories[cfg.ID().Type()] + factory := factories[cfg.ID().Type()] if factory == nil { return nil, fmt.Errorf("exporter factory not found for type: %s", cfg.ID().Type()) } exporter := &builtExporter{ - logger: logger, + logger: set.Logger, expByDataType: make(map[config.DataType]component.Exporter, 3), } inputDataTypes := exportersInputDataTypes[cfg] if inputDataTypes == nil { - eb.logger.Info("Ignoring exporter as it is not used by any pipeline") + set.Logger.Info("Ignoring exporter as it is not used by any pipeline") return exporter, nil } - set := component.ExporterCreateSettings{ - Logger: logger, - BuildInfo: buildInfo, - } - var err error var createdExporter component.Exporter for dataType, requirement := range inputDataTypes { @@ -271,7 +263,7 @@ func (eb *exportersBuilder) buildExporter( exporter.expByDataType[dataType] = createdExporter } - eb.logger.Info("Exporter was built.", zap.Stringer("exporter", cfg.ID())) + set.Logger.Info("Exporter was built.") return exporter, nil } diff --git a/internal/otel_collector/service/internal/builder/extensions_builder.go b/internal/otel_collector/service/internal/builder/extensions_builder.go index 8719d93ee65..8a116b5fe89 100644 --- a/internal/otel_collector/service/internal/builder/extensions_builder.go +++ b/internal/otel_collector/service/internal/builder/extensions_builder.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -110,32 +111,28 @@ func (exts Extensions) ToMap() map[config.ComponentID]component.Extension { return result } -// exportersBuilder builds exporters from config. -type extensionsBuilder struct { - logger *zap.Logger - buildInfo component.BuildInfo - config *config.Config - factories map[config.Type]component.ExtensionFactory -} - // BuildExtensions builds Extensions from config. func BuildExtensions( logger *zap.Logger, + tracerProvider trace.TracerProvider, buildInfo component.BuildInfo, config *config.Config, factories map[config.Type]component.ExtensionFactory, ) (Extensions, error) { - eb := &extensionsBuilder{logger.With(zap.String(zapKindKey, zapKindExtension)), buildInfo, config, factories} - + logger = logger.With(zap.String(zapKindKey, zapKindExtension)) extensions := make(Extensions) - for _, extName := range eb.config.Service.Extensions { - extCfg, exists := eb.config.Extensions[extName] + for _, extName := range config.Service.Extensions { + extCfg, exists := config.Extensions[extName] if !exists { return nil, fmt.Errorf("extension %q is not configured", extName) } - componentLogger := eb.logger.With(zap.Stringer(zapNameKey, extCfg.ID())) - ext, err := eb.buildExtension(componentLogger, eb.buildInfo, extCfg) + set := component.ExtensionCreateSettings{ + Logger: logger.With(zap.Stringer(zapNameKey, extCfg.ID())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + ext, err := buildExtension(context.Background(), factories, set, extCfg) if err != nil { return nil, err } @@ -146,22 +143,17 @@ func BuildExtensions( return extensions, nil } -func (eb *extensionsBuilder) buildExtension(logger *zap.Logger, buildInfo component.BuildInfo, cfg config.Extension) (*builtExtension, error) { - factory := eb.factories[cfg.ID().Type()] +func buildExtension(ctx context.Context, factories map[config.Type]component.ExtensionFactory, creationSet component.ExtensionCreateSettings, cfg config.Extension) (*builtExtension, error) { + factory := factories[cfg.ID().Type()] if factory == nil { return nil, fmt.Errorf("extension factory for type %q is not configured", cfg.ID().Type()) } ext := &builtExtension{ - logger: logger, - } - - creationSet := component.ExtensionCreateSettings{ - Logger: logger, - BuildInfo: buildInfo, + logger: creationSet.Logger, } - ex, err := factory.CreateExtension(context.Background(), creationSet, cfg) + ex, err := factory.CreateExtension(ctx, creationSet, cfg) if err != nil { return nil, fmt.Errorf("failed to create extension %v: %w", cfg.ID(), err) } diff --git a/internal/otel_collector/service/internal/builder/pipelines_builder.go b/internal/otel_collector/service/internal/builder/pipelines_builder.go index 33bb10ba4d4..a5bd450ac95 100644 --- a/internal/otel_collector/service/internal/builder/pipelines_builder.go +++ b/internal/otel_collector/service/internal/builder/pipelines_builder.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -81,23 +82,25 @@ func (bps BuiltPipelines) ShutdownProcessors(ctx context.Context) error { // pipelinesBuilder builds Pipelines from config. type pipelinesBuilder struct { - logger *zap.Logger - buildInfo component.BuildInfo - config *config.Config - exporters Exporters - factories map[config.Type]component.ProcessorFactory + logger *zap.Logger + tracerProvider trace.TracerProvider + buildInfo component.BuildInfo + config *config.Config + exporters Exporters + factories map[config.Type]component.ProcessorFactory } // BuildPipelines builds pipeline processors from config. Requires exporters to be already // built via BuildExporters. func BuildPipelines( logger *zap.Logger, + tracerProvider trace.TracerProvider, buildInfo component.BuildInfo, config *config.Config, exporters Exporters, factories map[config.Type]component.ProcessorFactory, ) (BuiltPipelines, error) { - pb := &pipelinesBuilder{logger, buildInfo, config, exporters, factories} + pb := &pipelinesBuilder{logger, tracerProvider, buildInfo, config, exporters, factories} pipelineProcessors := make(BuiltPipelines) for _, pipeline := range pb.config.Service.Pipelines { @@ -125,11 +128,11 @@ func (pb *pipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *conf switch pipelineCfg.InputType { case config.TracesDataType: - tc = pb.buildFanoutExportersTraceConsumer(pipelineCfg.Exporters) + tc = pb.buildFanoutExportersTracesConsumer(pipelineCfg.Exporters) case config.MetricsDataType: mc = pb.buildFanoutExportersMetricsConsumer(pipelineCfg.Exporters) case config.LogsDataType: - lc = pb.buildFanoutExportersLogConsumer(pipelineCfg.Exporters) + lc = pb.buildFanoutExportersLogsConsumer(pipelineCfg.Exporters) } mutatesConsumedData := false @@ -150,16 +153,16 @@ func (pb *pipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *conf // it becomes the next for the previous one (previous in the pipeline, // which we will build in the next loop iteration). var err error - componentLogger := pb.logger.With(zap.String(zapKindKey, zapKindProcessor), zap.Stringer(zapNameKey, procCfg.ID())) - creationSet := component.ProcessorCreateSettings{ - Logger: componentLogger, - BuildInfo: pb.buildInfo, + set := component.ProcessorCreateSettings{ + Logger: pb.logger.With(zap.String(zapKindKey, zapKindProcessor), zap.Stringer(zapNameKey, procCfg.ID())), + TracerProvider: pb.tracerProvider, + BuildInfo: pb.buildInfo, } switch pipelineCfg.InputType { case config.TracesDataType: var proc component.TracesProcessor - proc, err = factory.CreateTracesProcessor(ctx, creationSet, procCfg, tc) + proc, err = factory.CreateTracesProcessor(ctx, set, procCfg, tc) if proc != nil { mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData } @@ -167,7 +170,7 @@ func (pb *pipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *conf tc = proc case config.MetricsDataType: var proc component.MetricsProcessor - proc, err = factory.CreateMetricsProcessor(ctx, creationSet, procCfg, mc) + proc, err = factory.CreateMetricsProcessor(ctx, set, procCfg, mc) if proc != nil { mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData } @@ -176,7 +179,7 @@ func (pb *pipelinesBuilder) buildPipeline(ctx context.Context, pipelineCfg *conf case config.LogsDataType: var proc component.LogsProcessor - proc, err = factory.CreateLogsProcessor(ctx, creationSet, procCfg, lc) + proc, err = factory.CreateLogsProcessor(ctx, set, procCfg, lc) if proc != nil { mutatesConsumedData = mutatesConsumedData || proc.Capabilities().MutatesData } @@ -226,7 +229,7 @@ func (pb *pipelinesBuilder) getBuiltExportersByNames(exporterIDs []config.Compon return result } -func (pb *pipelinesBuilder) buildFanoutExportersTraceConsumer(exporterIDs []config.ComponentID) consumer.Traces { +func (pb *pipelinesBuilder) buildFanoutExportersTracesConsumer(exporterIDs []config.ComponentID) consumer.Traces { builtExporters := pb.getBuiltExportersByNames(exporterIDs) var exporters []consumer.Traces @@ -250,7 +253,7 @@ func (pb *pipelinesBuilder) buildFanoutExportersMetricsConsumer(exporterIDs []co return fanoutconsumer.NewMetrics(exporters) } -func (pb *pipelinesBuilder) buildFanoutExportersLogConsumer(exporterIDs []config.ComponentID) consumer.Logs { +func (pb *pipelinesBuilder) buildFanoutExportersLogsConsumer(exporterIDs []config.ComponentID) consumer.Logs { builtExporters := pb.getBuiltExportersByNames(exporterIDs) exporters := make([]consumer.Logs, len(builtExporters)) diff --git a/internal/otel_collector/service/internal/builder/receivers_builder.go b/internal/otel_collector/service/internal/builder/receivers_builder.go index fa2be2813e1..76653e3ac7e 100644 --- a/internal/otel_collector/service/internal/builder/receivers_builder.go +++ b/internal/otel_collector/service/internal/builder/receivers_builder.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -89,6 +90,7 @@ type receiversBuilder struct { // BuildReceivers builds Receivers from config. func BuildReceivers( logger *zap.Logger, + tracerProvider trace.TracerProvider, buildInfo component.BuildInfo, config *config.Config, builtPipelines BuiltPipelines, @@ -97,17 +99,21 @@ func BuildReceivers( rb := &receiversBuilder{logger.With(zap.String(zapKindKey, zapKindReceiver)), buildInfo, config, builtPipelines, factories} receivers := make(Receivers) - for _, cfg := range rb.config.Receivers { - recvLogger := rb.logger.With(zap.Stringer(zapNameKey, cfg.ID())) - rcv, err := rb.buildReceiver(context.Background(), recvLogger, rb.buildInfo, cfg) + for _, recvCfg := range rb.config.Receivers { + set := component.ReceiverCreateSettings{ + Logger: rb.logger.With(zap.Stringer(zapNameKey, recvCfg.ID())), + TracerProvider: tracerProvider, + BuildInfo: buildInfo, + } + rcv, err := rb.buildReceiver(context.Background(), set, recvCfg) if err != nil { if err == errUnusedReceiver { - recvLogger.Info("Ignoring receiver as it is not used by any pipeline") + set.Logger.Info("Ignoring receiver as it is not used by any pipeline") continue } return nil, err } - receivers[cfg] = rcv + receivers[recvCfg] = rcv } return receivers, nil @@ -125,7 +131,7 @@ func hasReceiver(pipeline *config.Pipeline, receiverID config.ComponentID) bool type attachedPipelines map[config.DataType][]*builtPipeline -func (rb *receiversBuilder) findPipelinesToAttach(cfg config.Receiver) (attachedPipelines, error) { +func (rb *receiversBuilder) findPipelinesToAttach(receiverID config.ComponentID) (attachedPipelines, error) { // A receiver may be attached to multiple pipelines. Pipelines may consume different // data types. We need to compile the list of pipelines of each type that must be // attached to this receiver according to configuration. @@ -144,7 +150,7 @@ func (rb *receiversBuilder) findPipelinesToAttach(cfg config.Receiver) (attached } // Is this receiver attached to the pipeline? - if hasReceiver(pipelineCfg, cfg.ID()) { + if hasReceiver(pipelineCfg, receiverID) { if _, exists := pipelinesToAttach[pipelineCfg.InputType]; !exists { pipelinesToAttach[pipelineCfg.InputType] = make([]*builtPipeline, 0) } @@ -158,10 +164,9 @@ func (rb *receiversBuilder) findPipelinesToAttach(cfg config.Receiver) (attached return pipelinesToAttach, nil } -func (rb *receiversBuilder) attachReceiverToPipelines( +func attachReceiverToPipelines( ctx context.Context, - logger *zap.Logger, - buildInfo component.BuildInfo, + set component.ReceiverCreateSettings, factory component.ReceiverFactory, dataType config.DataType, cfg config.Receiver, @@ -173,23 +178,19 @@ func (rb *receiversBuilder) attachReceiverToPipelines( // sure its output is fanned out to all attached pipelines. var err error var createdReceiver component.Receiver - creationSet := component.ReceiverCreateSettings{ - Logger: logger, - BuildInfo: buildInfo, - } switch dataType { case config.TracesDataType: junction := buildFanoutTraceConsumer(builtPipelines) - createdReceiver, err = factory.CreateTracesReceiver(ctx, creationSet, cfg, junction) + createdReceiver, err = factory.CreateTracesReceiver(ctx, set, cfg, junction) case config.MetricsDataType: junction := buildFanoutMetricConsumer(builtPipelines) - createdReceiver, err = factory.CreateMetricsReceiver(ctx, creationSet, cfg, junction) + createdReceiver, err = factory.CreateMetricsReceiver(ctx, set, cfg, junction) case config.LogsDataType: junction := buildFanoutLogConsumer(builtPipelines) - createdReceiver, err = factory.CreateLogsReceiver(ctx, creationSet, cfg, junction) + createdReceiver, err = factory.CreateLogsReceiver(ctx, set, cfg, junction) default: err = componenterror.ErrDataTypeIsNotSupported @@ -227,26 +228,26 @@ func (rb *receiversBuilder) attachReceiverToPipelines( } rcv.receiver = createdReceiver - logger.Info("Receiver was built.", zap.String("datatype", string(dataType))) + set.Logger.Info("Receiver was built.", zap.String("datatype", string(dataType))) return nil } -func (rb *receiversBuilder) buildReceiver(ctx context.Context, logger *zap.Logger, buildInfo component.BuildInfo, config config.Receiver) (*builtReceiver, error) { +func (rb *receiversBuilder) buildReceiver(ctx context.Context, set component.ReceiverCreateSettings, cfg config.Receiver) (*builtReceiver, error) { // First find pipelines that must be attached to this receiver. - pipelinesToAttach, err := rb.findPipelinesToAttach(config) + pipelinesToAttach, err := rb.findPipelinesToAttach(cfg.ID()) if err != nil { return nil, err } // Prepare to build the receiver. - factory := rb.factories[config.ID().Type()] + factory := rb.factories[cfg.ID().Type()] if factory == nil { - return nil, fmt.Errorf("receiver factory not found for: %v", config.ID()) + return nil, fmt.Errorf("receiver factory not found for: %v", cfg.ID()) } rcv := &builtReceiver{ - logger: logger, + logger: set.Logger, } // Now we have list of pipelines broken down by data type. Iterate for each data type. @@ -258,7 +259,7 @@ func (rb *receiversBuilder) buildReceiver(ctx context.Context, logger *zap.Logge // Attach the corresponding part of the receiver to all pipelines that require // this data type. - err := rb.attachReceiverToPipelines(ctx, logger, buildInfo, factory, dataType, config, rcv, pipelines) + err := attachReceiverToPipelines(ctx, set, factory, dataType, cfg, rcv, pipelines) if err != nil { return nil, err } diff --git a/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go b/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go index 14709c57af1..ad266c4dc33 100644 --- a/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go +++ b/internal/otel_collector/service/internal/fanoutconsumer/cloningconsumer.go @@ -19,7 +19,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // NewMetricsCloning wraps multiple metrics consumers in a single one and clones the data diff --git a/internal/otel_collector/service/internal/fanoutconsumer/consumer.go b/internal/otel_collector/service/internal/fanoutconsumer/consumer.go index ded5f4a1e0d..77c474298f0 100644 --- a/internal/otel_collector/service/internal/fanoutconsumer/consumer.go +++ b/internal/otel_collector/service/internal/fanoutconsumer/consumer.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // NewMetrics wraps multiple metrics consumers in a single one. diff --git a/internal/otel_collector/service/parserprovider/setflag.go b/internal/otel_collector/service/parserprovider/setflag.go index f7056658ff8..5a64832fcbf 100644 --- a/internal/otel_collector/service/parserprovider/setflag.go +++ b/internal/otel_collector/service/parserprovider/setflag.go @@ -19,13 +19,13 @@ import ( "fmt" "strings" - "github.com/spf13/viper" + "github.com/knadh/koanf" + "github.com/knadh/koanf/providers/confmap" + "github.com/magiconair/properties" "go.opentelemetry.io/collector/config/configparser" ) -const setFlagFileType = "properties" - type setFlagProvider struct { base ParserProvider } @@ -54,51 +54,32 @@ func (sfl *setFlagProvider) Get() (*configparser.Parser, error) { return nil, err } } - viperFlags := viper.NewWithOptions(viper.KeyDelimiter(configparser.KeyDelimiter)) - viperFlags.SetConfigType(setFlagFileType) - if err := viperFlags.ReadConfig(b); err != nil { - return nil, fmt.Errorf("failed to read set flag config: %v", err) - } - cp, err := sfl.base.Get() - if err != nil { + var props *properties.Properties + var err error + if props, err = properties.Load(b.Bytes(), properties.UTF8); err != nil { return nil, err } - // Viper implementation of v.MergeConfig(io.Reader) or v.MergeConfigMap(map[string]interface) - // does not work properly. This is b/c if it attempts to merge into a nil object it will fail here - // https://github.com/spf13/viper/blob/3826be313591f83193f048520482a7b3cf17d506/viper.go#L1709 - - // The workaround is to call v.Set(string, interface) on all root properties from the config file - // this will correctly preserve the original config and set them up for viper to overlay them with - // the --set params. It should also be noted that setting the root keys is important. This is - // b/c the viper .AllKeys() method does not return empty objects. - // For instance with the following yaml structure: - // a: - // b: - // c: {} - // - // viper.AllKeys() would only return a.b, but not a.c. However otel expects {} to behave - // the same as nil object in its config file. Therefore we extract and set the root keys only - // to catch both a.b and a.c. - - rootKeys := map[string]struct{}{} - for _, k := range viperFlags.AllKeys() { - keys := strings.Split(k, configparser.KeyDelimiter) - if len(keys) > 0 { - rootKeys[keys[0]] = struct{}{} - } + // Create a map manually instead of using props.Map() to allow env var expansion + // as used by original Viper-based configparser.Parser. + parsed := map[string]interface{}{} + for _, key := range props.Keys() { + value, _ := props.Get(key) + parsed[key] = value } - for k := range rootKeys { - cp.Set(k, cp.Get(k)) + propertyKoanf := koanf.New(".") + if err = propertyKoanf.Load(confmap.Provider(parsed, "."), nil); err != nil { + return nil, fmt.Errorf("failed to read set flag config: %v", err) } - // now that we've copied the config into the viper "overrides" copy the --set flags - // as well - for _, k := range viperFlags.AllKeys() { - cp.Set(k, viperFlags.Get(k)) + var cp *configparser.Parser + if cp, err = sfl.base.Get(); err != nil { + return nil, err } + cp.MergeStringMap(propertyKoanf.Raw()) + return cp, nil } diff --git a/internal/otel_collector/service/service.go b/internal/otel_collector/service/service.go index b15458d6889..91b1eb1e784 100644 --- a/internal/otel_collector/service/service.go +++ b/internal/otel_collector/service/service.go @@ -18,6 +18,8 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.opentelemetry.io/collector/component" @@ -32,6 +34,7 @@ type service struct { buildInfo component.BuildInfo config *config.Config logger *zap.Logger + tracerProvider trace.TracerProvider asyncErrorChannel chan error builtExporters builder.Exporters @@ -42,10 +45,12 @@ type service struct { func newService(set *svcSettings) (*service, error) { srv := &service{ - factories: set.Factories, - buildInfo: set.BuildInfo, - config: set.Config, - logger: set.Logger, + factories: set.Factories, + buildInfo: set.BuildInfo, + config: set.Config, + logger: set.Logger, + // TODO: Configure the right tracer provider. + tracerProvider: otel.GetTracerProvider(), asyncErrorChannel: set.AsyncErrorChannel, } @@ -126,7 +131,7 @@ func (srv *service) GetExporters() map[config.DataType]map[config.ComponentID]co func (srv *service) buildExtensions() error { var err error - srv.builtExtensions, err = builder.BuildExtensions(srv.logger, srv.buildInfo, srv.config, srv.factories.Extensions) + srv.builtExtensions, err = builder.BuildExtensions(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.factories.Extensions) if err != nil { return fmt.Errorf("cannot build builtExtensions: %w", err) } @@ -157,20 +162,20 @@ func (srv *service) buildPipelines() error { // First create exporters. var err error - srv.builtExporters, err = builder.BuildExporters(srv.logger, srv.buildInfo, srv.config, srv.factories.Exporters) + srv.builtExporters, err = builder.BuildExporters(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.factories.Exporters) if err != nil { return fmt.Errorf("cannot build builtExporters: %w", err) } // Create pipelines and their processors and plug exporters to the // end of the pipelines. - srv.builtPipelines, err = builder.BuildPipelines(srv.logger, srv.buildInfo, srv.config, srv.builtExporters, srv.factories.Processors) + srv.builtPipelines, err = builder.BuildPipelines(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.builtExporters, srv.factories.Processors) if err != nil { return fmt.Errorf("cannot build pipelines: %w", err) } // Create receivers and plug them into the start of the pipelines. - srv.builtReceivers, err = builder.BuildReceivers(srv.logger, srv.buildInfo, srv.config, srv.builtPipelines, srv.factories.Receivers) + srv.builtReceivers, err = builder.BuildReceivers(srv.logger, srv.tracerProvider, srv.buildInfo, srv.config, srv.builtPipelines, srv.factories.Receivers) if err != nil { return fmt.Errorf("cannot build receivers: %w", err) } diff --git a/internal/otel_collector/service/settings.go b/internal/otel_collector/service/settings.go index 5cae7cbb7f6..bb66914b464 100644 --- a/internal/otel_collector/service/settings.go +++ b/internal/otel_collector/service/settings.go @@ -27,7 +27,7 @@ type svcSettings struct { // Factories component factories. Factories component.Factories - // BuildInfo provides application start information. + // BuildInfo provides collector start information. BuildInfo component.BuildInfo // Config represents the configuration of the service. @@ -40,14 +40,20 @@ type svcSettings struct { AsyncErrorChannel chan error } -// AppSettings holds configuration for creating a new Application. -type AppSettings struct { +// CollectorSettings holds configuration for creating a new Collector. +type CollectorSettings struct { // Factories component factories. Factories component.Factories - // BuildInfo provides application start information. + // BuildInfo provides collector start information. BuildInfo component.BuildInfo + // DisableGracefulShutdown disables the automatic graceful shutdown + // of the collector on SIGINT or SIGTERM. + // Users who want to handle signals themselves can disable this behavior + // and manually handle the signals to shutdown the collector. + DisableGracefulShutdown bool + // ParserProvider provides the configuration's Parser. // If it is not provided a default provider is used. The default provider loads the configuration // from a config file define by the --config command line flag and overrides component's configuration diff --git a/internal/otel_collector/service/telemetry.go b/internal/otel_collector/service/telemetry.go index cd785f6d15e..2f5cf125e81 100644 --- a/internal/otel_collector/service/telemetry.go +++ b/internal/otel_collector/service/telemetry.go @@ -34,20 +34,20 @@ import ( "go.opentelemetry.io/collector/translator/conventions" ) -// applicationTelemetry is application's own telemetry. -var applicationTelemetry appTelemetryExporter = &appTelemetry{} +// collectorTelemetry is collector's own telemetry. +var collectorTelemetry collectorTelemetryExporter = &colTelemetry{} -type appTelemetryExporter interface { +type collectorTelemetryExporter interface { init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error shutdown() error } -type appTelemetry struct { +type colTelemetry struct { views []*view.View server *http.Server } -func (tel *appTelemetry) init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error { +func (tel *colTelemetry) init(asyncErrorChannel chan<- error, ballastSizeBytes uint64, logger *zap.Logger) error { level := configtelemetry.GetMetricsLevelFlagValue() metricsAddr := telemetry.GetMetricsAddr() @@ -121,7 +121,7 @@ func (tel *appTelemetry) init(asyncErrorChannel chan<- error, ballastSizeBytes u return nil } -func (tel *appTelemetry) shutdown() error { +func (tel *colTelemetry) shutdown() error { view.Unregister(tel.views...) if tel.server != nil { diff --git a/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go b/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go index c8f44b06ebb..c843f828f64 100644 --- a/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go +++ b/internal/otel_collector/testbed/correctness/metrics/correctness_test_case.go @@ -32,7 +32,7 @@ type correctnessTestCase struct { sender testbed.DataSender receiver testbed.DataReceiver harness *testHarness - collector *testbed.InProcessCollector + collector testbed.OtelcolRunner } func newCorrectnessTestCase( diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_diff.go b/internal/otel_collector/testbed/correctness/metrics/metric_diff.go index 1eb529312cf..d4856281a06 100644 --- a/internal/otel_collector/testbed/correctness/metrics/metric_diff.go +++ b/internal/otel_collector/testbed/correctness/metrics/metric_diff.go @@ -18,7 +18,7 @@ import ( "fmt" "reflect" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // MetricDiff is intended to support producing human-readable diffs between two MetricData structs during @@ -106,16 +106,16 @@ func DiffMetric(diffs []*MetricDiff, expected pdata.Metric, actual pdata.Metric) switch actual.DataType() { case pdata.MetricDataTypeIntGauge: diffs = diffIntPts(diffs, expected.IntGauge().DataPoints(), actual.IntGauge().DataPoints()) - case pdata.MetricDataTypeDoubleGauge: - diffs = diffDoublePts(diffs, expected.DoubleGauge().DataPoints(), actual.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeGauge: + diffs = diffDoublePts(diffs, expected.Gauge().DataPoints(), actual.Gauge().DataPoints()) case pdata.MetricDataTypeIntSum: diffs = diff(diffs, expected.IntSum().IsMonotonic(), actual.IntSum().IsMonotonic(), "IntSum IsMonotonic") diffs = diff(diffs, expected.IntSum().AggregationTemporality(), actual.IntSum().AggregationTemporality(), "IntSum AggregationTemporality") diffs = diffIntPts(diffs, expected.IntSum().DataPoints(), actual.IntSum().DataPoints()) - case pdata.MetricDataTypeDoubleSum: - diffs = diff(diffs, expected.DoubleSum().IsMonotonic(), actual.DoubleSum().IsMonotonic(), "DoubleSum IsMonotonic") - diffs = diff(diffs, expected.DoubleSum().AggregationTemporality(), actual.DoubleSum().AggregationTemporality(), "DoubleSum AggregationTemporality") - diffs = diffDoublePts(diffs, expected.DoubleSum().DataPoints(), actual.DoubleSum().DataPoints()) + case pdata.MetricDataTypeSum: + diffs = diff(diffs, expected.Sum().IsMonotonic(), actual.Sum().IsMonotonic(), "Sum IsMonotonic") + diffs = diff(diffs, expected.Sum().AggregationTemporality(), actual.Sum().AggregationTemporality(), "Sum AggregationTemporality") + diffs = diffDoublePts(diffs, expected.Sum().DataPoints(), actual.Sum().DataPoints()) case pdata.MetricDataTypeIntHistogram: diffs = diff(diffs, expected.IntHistogram().AggregationTemporality(), actual.IntHistogram().AggregationTemporality(), "IntHistogram AggregationTemporality") diffs = diffIntHistogramPts(diffs, expected.IntHistogram().DataPoints(), actual.IntHistogram().DataPoints()) diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_index.go b/internal/otel_collector/testbed/correctness/metrics/metric_index.go index a07f9b39a14..091c8e47121 100644 --- a/internal/otel_collector/testbed/correctness/metrics/metric_index.go +++ b/internal/otel_collector/testbed/correctness/metrics/metric_index.go @@ -15,7 +15,7 @@ package metrics import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type metricReceived struct { diff --git a/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go b/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go index d1b01510e32..624a2a3b455 100644 --- a/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go +++ b/internal/otel_collector/testbed/correctness/metrics/metric_supplier.go @@ -15,7 +15,7 @@ package metrics import ( - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type metricSupplier struct { diff --git a/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go b/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go index 71bbf971da5..5004bb19749 100644 --- a/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go +++ b/internal/otel_collector/testbed/correctness/metrics/metrics_test_harness.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/testbed/testbed" ) diff --git a/internal/otel_collector/testbed/testbed/child_process.go b/internal/otel_collector/testbed/testbed/child_process_collector.go similarity index 85% rename from internal/otel_collector/testbed/testbed/child_process.go rename to internal/otel_collector/testbed/testbed/child_process_collector.go index c4d70da239f..23641cfed5d 100644 --- a/internal/otel_collector/testbed/testbed/child_process.go +++ b/internal/otel_collector/testbed/testbed/child_process_collector.go @@ -36,34 +36,9 @@ import ( "go.uber.org/atomic" ) -// ResourceSpec is a resource consumption specification. -type ResourceSpec struct { - // Percentage of one core the process is expected to consume at most. - // Test is aborted and failed if consumption during - // ResourceCheckPeriod exceeds this number. If 0 the CPU - // consumption is not monitored and does not affect the test result. - ExpectedMaxCPU uint32 - - // Maximum RAM in MiB the process is expected to consume. - // Test is aborted and failed if consumption exceeds this number. - // If 0 memory consumption is not monitored and does not affect - // the test result. - ExpectedMaxRAM uint32 - - // Period during which CPU and RAM of the process are measured. - // Bigger numbers will result in more averaging of short spikes. - ResourceCheckPeriod time.Duration -} - -// isSpecified returns true if any part of ResourceSpec is specified, -// i.e. has non-zero value. -func (rs *ResourceSpec) isSpecified() bool { - return rs != nil && (rs.ExpectedMaxCPU != 0 || rs.ExpectedMaxRAM != 0) -} - -// ChildProcess implements the OtelcolRunner interface as a child process on the same machine executing +// childProcessCollector implements the OtelcolRunner interface as a child process on the same machine executing // the test. The process can be monitored and the output of which will be written to a log file. -type ChildProcess struct { +type childProcessCollector struct { // Path to agent executable. If unset the default executable in // bin/otelcol_{{.GOOS}}_{{.GOARCH}} will be used. // Can be set for example to use the unstable executable for a specific test. @@ -121,21 +96,12 @@ type ChildProcess struct { ramMiBMax uint32 } -type StartParams struct { - Name string - LogFilePath string - CmdArgs []string - resourceSpec *ResourceSpec -} - -type ResourceConsumption struct { - CPUPercentAvg float64 - CPUPercentMax float64 - RAMMiBAvg uint32 - RAMMiBMax uint32 +// NewChildProcessCollector crewtes a new OtelcolRunner as a child process on the same machine executing the test. +func NewChildProcessCollector() OtelcolRunner { + return &childProcessCollector{} } -func (cp *ChildProcess) PrepareConfig(configStr string) (configCleanup func(), err error) { +func (cp *childProcessCollector) PrepareConfig(configStr string) (configCleanup func(), err error) { configCleanup = func() { // NoOp } @@ -198,7 +164,7 @@ func expandExeFileName(exeName string) string { // logFilePath is the file path to write the standard output and standard error of // the process to. // cmdArgs is the command line arguments to pass to the process. -func (cp *ChildProcess) Start(params StartParams) error { +func (cp *childProcessCollector) Start(params StartParams) error { cp.name = params.Name cp.doneSignal = make(chan struct{}) @@ -275,7 +241,7 @@ func (cp *ChildProcess) Start(params StartParams) error { return err } -func (cp *ChildProcess) Stop() (stopped bool, err error) { +func (cp *childProcessCollector) Stop() (stopped bool, err error) { if !cp.isStarted || cp.isStopped { return false, nil } @@ -341,7 +307,7 @@ func (cp *ChildProcess) Stop() (stopped bool, err error) { return stopped, err } -func (cp *ChildProcess) WatchResourceConsumption() error { +func (cp *childProcessCollector) WatchResourceConsumption() error { if !cp.resourceSpec.isSpecified() { // Resource monitoring is not enabled. return nil @@ -388,11 +354,11 @@ func (cp *ChildProcess) WatchResourceConsumption() error { } } -func (cp *ChildProcess) GetProcessMon() *process.Process { +func (cp *childProcessCollector) GetProcessMon() *process.Process { return cp.processMon } -func (cp *ChildProcess) fetchRAMUsage() { +func (cp *childProcessCollector) fetchRAMUsage() { // Get process memory and CPU times mi, err := cp.processMon.MemoryInfo() if err != nil { @@ -415,7 +381,7 @@ func (cp *ChildProcess) fetchRAMUsage() { cp.ramMiBCur.Store(ramMiBCur) } -func (cp *ChildProcess) fetchCPUUsage() { +func (cp *childProcessCollector) fetchCPUUsage() { times, err := cp.processMon.Times() if err != nil { log.Printf("cannot get process times for %d: %s", @@ -448,7 +414,7 @@ func (cp *ChildProcess) fetchCPUUsage() { cp.cpuPercentX1000Cur.Store(curCPUPercentageX1000) } -func (cp *ChildProcess) checkAllowedResourceUsage() error { +func (cp *childProcessCollector) checkAllowedResourceUsage() error { // Check if current CPU usage exceeds expected. var errMsg string if cp.resourceSpec.ExpectedMaxCPU != 0 && cp.cpuPercentX1000Cur.Load()/1000 > cp.resourceSpec.ExpectedMaxCPU { @@ -472,7 +438,7 @@ func (cp *ChildProcess) checkAllowedResourceUsage() error { } // GetResourceConsumption returns resource consumption as a string -func (cp *ChildProcess) GetResourceConsumption() string { +func (cp *childProcessCollector) GetResourceConsumption() string { if !cp.resourceSpec.isSpecified() { // Monitoring is not enabled. return "" @@ -486,7 +452,7 @@ func (cp *ChildProcess) GetResourceConsumption() string { } // GetTotalConsumption returns total resource consumption since start of process -func (cp *ChildProcess) GetTotalConsumption() *ResourceConsumption { +func (cp *childProcessCollector) GetTotalConsumption() *ResourceConsumption { rc := &ResourceConsumption{} if cp.processMon != nil { diff --git a/internal/otel_collector/testbed/testbed/data_providers.go b/internal/otel_collector/testbed/testbed/data_providers.go index 2eb490acf43..ccc8eb115b5 100644 --- a/internal/otel_collector/testbed/testbed/data_providers.go +++ b/internal/otel_collector/testbed/testbed/data_providers.go @@ -15,31 +15,27 @@ package testbed import ( + "io/ioutil" "log" "os" "path/filepath" "strconv" "time" - "github.com/gogo/protobuf/jsonpb" - "github.com/gogo/protobuf/proto" "go.uber.org/atomic" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal" - otlplogscol "go.opentelemetry.io/collector/internal/data/protogen/collector/logs/v1" - otlpmetricscol "go.opentelemetry.io/collector/internal/data/protogen/collector/metrics/v1" - otlptracecol "go.opentelemetry.io/collector/internal/data/protogen/collector/trace/v1" "go.opentelemetry.io/collector/internal/goldendataset" "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" ) // DataProvider defines the interface for generators of test data used to drive various end-to-end tests. type DataProvider interface { // SetLoadGeneratorCounters supplies pointers to LoadGenerator counters. // The data provider implementation should increment these as it generates data. - SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) + SetLoadGeneratorCounters(dataItemsGenerated *atomic.Uint64) // GenerateTraces returns an internal Traces instance with an OTLP ResourceSpans slice populated with test data. GenerateTraces() (pdata.Traces, bool) // GenerateMetrics returns an internal MetricData instance with an OTLP ResourceMetrics slice of test data. @@ -48,34 +44,32 @@ type DataProvider interface { GenerateLogs() (pdata.Logs, bool) } -// PerfTestDataProvider in an implementation of the DataProvider for use in performance tests. +// perfTestDataProvider in an implementation of the DataProvider for use in performance tests. // Tracing IDs are based on the incremented batch and data items counters. -type PerfTestDataProvider struct { +type perfTestDataProvider struct { options LoadOptions - batchesGenerated *atomic.Uint64 + traceIDSequence atomic.Uint64 dataItemsGenerated *atomic.Uint64 } -// NewPerfTestDataProvider creates an instance of PerfTestDataProvider which generates test data based on the sizes +// NewPerfTestDataProvider creates an instance of perfTestDataProvider which generates test data based on the sizes // specified in the supplied LoadOptions. -func NewPerfTestDataProvider(options LoadOptions) *PerfTestDataProvider { - return &PerfTestDataProvider{ +func NewPerfTestDataProvider(options LoadOptions) DataProvider { + return &perfTestDataProvider{ options: options, } } -func (dp *PerfTestDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { - dp.batchesGenerated = batchesGenerated +func (dp *perfTestDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *atomic.Uint64) { dp.dataItemsGenerated = dataItemsGenerated } -func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { - +func (dp *perfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { traceData := pdata.NewTraces() spans := traceData.ResourceSpans().AppendEmpty().InstrumentationLibrarySpans().AppendEmpty().Spans() - spans.Resize(dp.options.ItemsPerBatch) + spans.EnsureCapacity(dp.options.ItemsPerBatch) - traceID := dp.batchesGenerated.Inc() + traceID := dp.traceIDSequence.Inc() for i := 0; i < dp.options.ItemsPerBatch; i++ { startTime := time.Now() @@ -83,7 +77,7 @@ func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { spanID := dp.dataItemsGenerated.Inc() - span := spans.At(i) + span := spans.AppendEmpty() // Create a span. span.SetTraceID(idutils.UInt64ToTraceID(0, traceID)) @@ -103,8 +97,7 @@ func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { return traceData, false } -func (dp *PerfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { - +func (dp *perfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { // Generate 7 data points per metric. const dataPointsPerMetric = 7 @@ -118,22 +111,22 @@ func (dp *PerfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { } } metrics := rm.InstrumentationLibraryMetrics().AppendEmpty().Metrics() - metrics.Resize(dp.options.ItemsPerBatch) + metrics.EnsureCapacity(dp.options.ItemsPerBatch) for i := 0; i < dp.options.ItemsPerBatch; i++ { - metric := metrics.At(i) + metric := metrics.AppendEmpty() metric.SetName("load_generator_" + strconv.Itoa(i)) metric.SetDescription("Load Generator Counter #" + strconv.Itoa(i)) metric.SetUnit("1") metric.SetDataType(pdata.MetricDataTypeIntGauge) - batchIndex := dp.batchesGenerated.Inc() + batchIndex := dp.traceIDSequence.Inc() dps := metric.IntGauge().DataPoints() // Generate data points for the metric. - dps.Resize(dataPointsPerMetric) + dps.EnsureCapacity(dataPointsPerMetric) for j := 0; j < dataPointsPerMetric; j++ { - dataPoint := dps.At(j) + dataPoint := dps.AppendEmpty() dataPoint.SetStartTimestamp(pdata.TimestampFromTime(time.Now())) value := dp.dataItemsGenerated.Inc() dataPoint.SetValue(int64(value)) @@ -146,7 +139,7 @@ func (dp *PerfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { return md, false } -func (dp *PerfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { +func (dp *perfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { logs := pdata.NewLogs() rl := logs.ResourceLogs().AppendEmpty() if dp.options.Attributes != nil { @@ -157,15 +150,15 @@ func (dp *PerfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { } } logRecords := rl.InstrumentationLibraryLogs().AppendEmpty().Logs() - logRecords.Resize(dp.options.ItemsPerBatch) + logRecords.EnsureCapacity(dp.options.ItemsPerBatch) now := pdata.TimestampFromTime(time.Now()) - batchIndex := dp.batchesGenerated.Inc() + batchIndex := dp.traceIDSequence.Inc() for i := 0; i < dp.options.ItemsPerBatch; i++ { itemIndex := dp.dataItemsGenerated.Inc() - record := logRecords.At(i) + record := logRecords.AppendEmpty() record.SetSeverityNumber(pdata.SeverityNumberINFO3) record.SetSeverityText("INFO3") record.SetName("load_generator_" + strconv.Itoa(i)) @@ -184,12 +177,11 @@ func (dp *PerfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { return logs, false } -// GoldenDataProvider is an implementation of DataProvider for use in correctness tests. +// goldenDataProvider is an implementation of DataProvider for use in correctness tests. // Provided data from the "Golden" dataset generated using pairwise combinatorial testing techniques. -type GoldenDataProvider struct { +type goldenDataProvider struct { tracePairsFile string spanPairsFile string - batchesGenerated *atomic.Uint64 dataItemsGenerated *atomic.Uint64 tracesGenerated []pdata.Traces @@ -200,22 +192,21 @@ type GoldenDataProvider struct { metricsIndex int } -// NewGoldenDataProvider creates a new instance of GoldenDataProvider which generates test data based +// NewGoldenDataProvider creates a new instance of goldenDataProvider which generates test data based // on the pairwise combinations specified in the tracePairsFile and spanPairsFile input variables. -func NewGoldenDataProvider(tracePairsFile string, spanPairsFile string, metricPairsFile string) *GoldenDataProvider { - return &GoldenDataProvider{ +func NewGoldenDataProvider(tracePairsFile string, spanPairsFile string, metricPairsFile string) DataProvider { + return &goldenDataProvider{ tracePairsFile: tracePairsFile, spanPairsFile: spanPairsFile, metricPairsFile: metricPairsFile, } } -func (dp *GoldenDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { - dp.batchesGenerated = batchesGenerated +func (dp *goldenDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *atomic.Uint64) { dp.dataItemsGenerated = dataItemsGenerated } -func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { +func (dp *goldenDataProvider) GenerateTraces() (pdata.Traces, bool) { if dp.tracesGenerated == nil { var err error dp.tracesGenerated, err = goldendataset.GenerateTraces(dp.tracePairsFile, dp.spanPairsFile) @@ -224,7 +215,6 @@ func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { dp.tracesGenerated = nil } } - dp.batchesGenerated.Inc() if dp.tracesIndex >= len(dp.tracesGenerated) { return pdata.NewTraces(), true } @@ -234,7 +224,7 @@ func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { return td, false } -func (dp *GoldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { +func (dp *goldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { if dp.metricsGenerated == nil { var err error dp.metricsGenerated, err = goldendataset.GenerateMetrics(dp.metricPairsFile) @@ -242,18 +232,16 @@ func (dp *GoldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { log.Printf("cannot generate metrics: %s", err) } } - dp.batchesGenerated.Inc() if dp.metricsIndex == len(dp.metricsGenerated) { return pdata.Metrics{}, true } pdm := dp.metricsGenerated[dp.metricsIndex] dp.metricsIndex++ - _, dpCount := pdm.MetricAndDataPointCount() - dp.dataItemsGenerated.Add(uint64(dpCount)) + dp.dataItemsGenerated.Add(uint64(pdm.DataPointCount())) return pdm, false } -func (dp *GoldenDataProvider) GenerateLogs() (pdata.Logs, bool) { +func (dp *goldenDataProvider) GenerateLogs() (pdata.Logs, bool) { return pdata.NewLogs(), true } @@ -263,9 +251,10 @@ func (dp *GoldenDataProvider) GenerateLogs() (pdata.Logs, bool) { // exporter (note: "file" exporter writes one JSON message per line, FileDataProvider // expects just a single JSON message in the entire file). type FileDataProvider struct { - batchesGenerated *atomic.Uint64 dataItemsGenerated *atomic.Uint64 - message proto.Message + logs pdata.Logs + metrics pdata.Metrics + traces pdata.Traces ItemsPerBatch int } @@ -276,72 +265,50 @@ func NewFileDataProvider(filePath string, dataType config.DataType) (*FileDataPr if err != nil { return nil, err } + var buf []byte + buf, err = ioutil.ReadAll(file) + if err != nil { + return nil, err + } - var message proto.Message - var dataPointCount int - + dp := &FileDataProvider{} // Load the message from the file and count the data points. - switch dataType { case config.TracesDataType: - var msg otlptracecol.ExportTraceServiceRequest - if err := protobufJSONUnmarshaler.Unmarshal(file, &msg); err != nil { + if dp.traces, err = otlp.NewJSONTracesUnmarshaler().UnmarshalTraces(buf); err != nil { return nil, err } - message = &msg - - md := pdata.TracesFromInternalRep(internal.TracesFromOtlp(&msg)) - dataPointCount = md.SpanCount() - + dp.ItemsPerBatch = dp.traces.SpanCount() case config.MetricsDataType: - var msg otlpmetricscol.ExportMetricsServiceRequest - if err := protobufJSONUnmarshaler.Unmarshal(file, &msg); err != nil { + if dp.metrics, err = otlp.NewJSONMetricsUnmarshaler().UnmarshalMetrics(buf); err != nil { return nil, err } - message = &msg - - md := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(&msg)) - _, dataPointCount = md.MetricAndDataPointCount() - + dp.ItemsPerBatch = dp.metrics.DataPointCount() case config.LogsDataType: - var msg otlplogscol.ExportLogsServiceRequest - if err := protobufJSONUnmarshaler.Unmarshal(file, &msg); err != nil { + if dp.logs, err = otlp.NewJSONLogsUnmarshaler().UnmarshalLogs(buf); err != nil { return nil, err } - message = &msg - - md := pdata.LogsFromInternalRep(internal.LogsFromOtlp(&msg)) - dataPointCount = md.LogRecordCount() + dp.ItemsPerBatch = dp.logs.LogRecordCount() } - return &FileDataProvider{ - message: message, - ItemsPerBatch: dataPointCount, - }, nil + return dp, nil } -func (dp *FileDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { - dp.batchesGenerated = batchesGenerated +func (dp *FileDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *atomic.Uint64) { dp.dataItemsGenerated = dataItemsGenerated } -// Marshaler configuration used for marhsaling Protobuf to JSON. Use default config. -var protobufJSONUnmarshaler = &jsonpb.Unmarshaler{} - func (dp *FileDataProvider) GenerateTraces() (pdata.Traces, bool) { - // TODO: implement similar to GenerateMetrics. - return pdata.NewTraces(), true + dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) + return dp.traces, false } func (dp *FileDataProvider) GenerateMetrics() (pdata.Metrics, bool) { - md := pdata.MetricsFromInternalRep(internal.MetricsFromOtlp(dp.message.(*otlpmetricscol.ExportMetricsServiceRequest))) - dp.batchesGenerated.Inc() - _, dataPointCount := md.MetricAndDataPointCount() - dp.dataItemsGenerated.Add(uint64(dataPointCount)) - return md, false + dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) + return dp.metrics, false } func (dp *FileDataProvider) GenerateLogs() (pdata.Logs, bool) { - // TODO: implement similar to GenerateMetrics. - return pdata.NewLogs(), true + dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) + return dp.logs, false } diff --git a/internal/otel_collector/testbed/testbed/in_process_collector.go b/internal/otel_collector/testbed/testbed/in_process_collector.go new file mode 100644 index 00000000000..e7fb4feb6ac --- /dev/null +++ b/internal/otel_collector/testbed/testbed/in_process_collector.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testbed + +import ( + "fmt" + "strings" + + "github.com/shirou/gopsutil/process" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/internal/version" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/parserprovider" +) + +// inProcessCollector implements the OtelcolRunner interfaces running a single otelcol as a go routine within the +// same process as the test executor. +type inProcessCollector struct { + logger *zap.Logger + factories component.Factories + configStr string + svc *service.Collector + appDone chan struct{} + stopped bool +} + +// NewInProcessCollector crewtes a new inProcessCollector using the supplied component factories. +func NewInProcessCollector(factories component.Factories) OtelcolRunner { + return &inProcessCollector{ + factories: factories, + } +} + +func (ipp *inProcessCollector) PrepareConfig(configStr string) (configCleanup func(), err error) { + configCleanup = func() { + // NoOp + } + var logger *zap.Logger + logger, err = configureLogger() + if err != nil { + return configCleanup, err + } + ipp.logger = logger + ipp.configStr = configStr + return configCleanup, err +} + +func (ipp *inProcessCollector) Start(args StartParams) error { + settings := service.CollectorSettings{ + BuildInfo: component.BuildInfo{ + Command: "otelcol", + Version: version.Version, + }, + Factories: ipp.factories, + ParserProvider: parserprovider.NewInMemory(strings.NewReader(ipp.configStr)), + } + var err error + ipp.svc, err = service.New(settings) + if err != nil { + return err + } + ipp.svc.Command().SetArgs(args.CmdArgs) + + ipp.appDone = make(chan struct{}) + go func() { + defer close(ipp.appDone) + appErr := ipp.svc.Run() + if appErr != nil { + err = appErr + } + }() + + for state := range ipp.svc.GetStateChannel() { + switch state { + case service.Starting: + // NoOp + case service.Running: + return err + default: + err = fmt.Errorf("unable to start, otelcol state is %d", state) + } + } + return err +} + +func (ipp *inProcessCollector) Stop() (stopped bool, err error) { + if !ipp.stopped { + ipp.stopped = true + ipp.svc.Shutdown() + } + <-ipp.appDone + stopped = ipp.stopped + return stopped, err +} + +func (ipp *inProcessCollector) WatchResourceConsumption() error { + return nil +} + +func (ipp *inProcessCollector) GetProcessMon() *process.Process { + return nil +} + +func (ipp *inProcessCollector) GetTotalConsumption() *ResourceConsumption { + return &ResourceConsumption{ + CPUPercentAvg: 0, + CPUPercentMax: 0, + RAMMiBAvg: 0, + RAMMiBMax: 0, + } +} + +func (ipp *inProcessCollector) GetResourceConsumption() string { + return "" +} + +func configureLogger() (*zap.Logger, error) { + conf := zap.NewDevelopmentConfig() + conf.Level.SetLevel(zapcore.InfoLevel) + logger, err := conf.Build() + return logger, err +} diff --git a/internal/otel_collector/testbed/testbed/load_generator.go b/internal/otel_collector/testbed/testbed/load_generator.go index 0ce3175f731..dbd4b90c63e 100644 --- a/internal/otel_collector/testbed/testbed/load_generator.go +++ b/internal/otel_collector/testbed/testbed/load_generator.go @@ -33,9 +33,6 @@ type LoadGenerator struct { dataProvider DataProvider - // Number of batches of data items sent. - batchesSent atomic.Uint64 - // Number of data items (spans or metric data points) sent. dataItemsSent atomic.Uint64 @@ -141,7 +138,7 @@ func (lg *LoadGenerator) generate() { return } - lg.dataProvider.SetLoadGeneratorCounters(&lg.batchesSent, &lg.dataItemsSent) + lg.dataProvider.SetLoadGeneratorCounters(&lg.dataItemsSent) err := lg.sender.Start() if err != nil { diff --git a/internal/otel_collector/testbed/testbed/mock_backend.go b/internal/otel_collector/testbed/testbed/mock_backend.go index 2b95ca80902..99d35433428 100644 --- a/internal/otel_collector/testbed/testbed/mock_backend.go +++ b/internal/otel_collector/testbed/testbed/mock_backend.go @@ -24,7 +24,7 @@ import ( "go.uber.org/atomic" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // MockBackend is a backend that allows receiving the data locally. @@ -218,14 +218,13 @@ func (mc *MockMetricConsumer) Capabilities() consumer.Capabilities { } func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { - _, dataPoints := md.MetricAndDataPointCount() - mc.numMetricsReceived.Add(uint64(dataPoints)) + mc.numMetricsReceived.Add(uint64(md.DataPointCount())) mc.backend.ConsumeMetric(md) return nil } -func (tc *MockTraceConsumer) MockConsumeTraceData(spansCount int) error { - tc.numSpansReceived.Add(uint64(spansCount)) +func (tc *MockTraceConsumer) MockConsumeTraceData(spanCount int) error { + tc.numSpansReceived.Add(uint64(spanCount)) return nil } diff --git a/internal/otel_collector/testbed/testbed/options.go b/internal/otel_collector/testbed/testbed/options.go index ed401666c8f..00cd7f1672c 100644 --- a/internal/otel_collector/testbed/testbed/options.go +++ b/internal/otel_collector/testbed/testbed/options.go @@ -17,26 +17,59 @@ package testbed -// TestCaseOption defines a TestCase option. -type TestCaseOption struct { - option func(t *TestCase) +import ( + "time" +) + +// ResourceSpec is a resource consumption specification. +type ResourceSpec struct { + // Percentage of one core the process is expected to consume at most. + // Test is aborted and failed if consumption during + // ResourceCheckPeriod exceeds this number. If 0 the CPU + // consumption is not monitored and does not affect the test result. + ExpectedMaxCPU uint32 + + // Maximum RAM in MiB the process is expected to consume. + // Test is aborted and failed if consumption exceeds this number. + // If 0 memory consumption is not monitored and does not affect + // the test result. + ExpectedMaxRAM uint32 + + // Period during which CPU and RAM of the process are measured. + // Bigger numbers will result in more averaging of short spikes. + ResourceCheckPeriod time.Duration } -// Apply takes a TestCase and runs the option function on it. -func (o TestCaseOption) Apply(t *TestCase) { - o.option(t) +// isSpecified returns true if any part of ResourceSpec is specified, +// i.e. has non-zero value. +func (rs *ResourceSpec) isSpecified() bool { + return rs != nil && (rs.ExpectedMaxCPU != 0 || rs.ExpectedMaxRAM != 0) } -// WithSkipResults option disables writing out results file for a TestCase. +// TestCaseOption defines a TestCase option. +type TestCaseOption func(t *TestCase) + +// WithSkipResults disables writing out results file for a TestCase. func WithSkipResults() TestCaseOption { - return TestCaseOption{func(t *TestCase) { - t.skipResults = true - }} + return func(tc *TestCase) { + tc.skipResults = true + } } -// WithConfigFile allows a custom configuration file for TestCase. -func WithConfigFile(file string) TestCaseOption { - return TestCaseOption{func(t *TestCase) { - t.agentConfigFile = file - }} +// WithResourceLimits sets expected limits for resource consmption. +// Error is signaled if consumption during ResourceCheckPeriod exceeds the limits. +// Limits are modified only for non-zero fields of resourceSpec, all zero-value fields +// fo resourceSpec are ignored and their previous values remain in effect. +func WithResourceLimits(resourceSpec ResourceSpec) TestCaseOption { + return func(tc *TestCase) { + if resourceSpec.ExpectedMaxCPU > 0 { + tc.resourceSpec.ExpectedMaxCPU = resourceSpec.ExpectedMaxCPU + } + if resourceSpec.ExpectedMaxRAM > 0 { + tc.resourceSpec.ExpectedMaxRAM = resourceSpec.ExpectedMaxRAM + } + if resourceSpec.ResourceCheckPeriod > 0 { + tc.resourceSpec.ResourceCheckPeriod = resourceSpec.ResourceCheckPeriod + } + } } diff --git a/internal/otel_collector/testbed/testbed/otelcol_runner.go b/internal/otel_collector/testbed/testbed/otelcol_runner.go index 80a2c97310c..1a4bd2b5cf4 100644 --- a/internal/otel_collector/testbed/testbed/otelcol_runner.go +++ b/internal/otel_collector/testbed/testbed/otelcol_runner.go @@ -15,19 +15,23 @@ package testbed import ( - "fmt" - "strings" - "github.com/shirou/gopsutil/process" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/internal/version" - "go.opentelemetry.io/collector/service" - "go.opentelemetry.io/collector/service/parserprovider" ) +type StartParams struct { + Name string + LogFilePath string + CmdArgs []string + resourceSpec *ResourceSpec +} + +type ResourceConsumption struct { + CPUPercentAvg float64 + CPUPercentMax float64 + RAMMiBAvg uint32 + RAMMiBMax uint32 +} + // OtelcolRunner defines the interface for configuring, starting and stopping one or more instances of // otelcol which will be the subject of testing being executed. type OtelcolRunner interface { @@ -50,111 +54,3 @@ type OtelcolRunner interface { // GetResourceConsumption returns the data collected by the process monitor as a display string. GetResourceConsumption() string } - -// InProcessCollector implements the OtelcolRunner interfaces running a single otelcol as a go routine within the -// same process as the test executor. -type InProcessCollector struct { - logger *zap.Logger - factories component.Factories - configStr string - svc *service.Application - appDone chan struct{} - stopped bool -} - -// NewInProcessCollector crewtes a new InProcessCollector using the supplied component factories. -func NewInProcessCollector(factories component.Factories) *InProcessCollector { - return &InProcessCollector{ - factories: factories, - } -} - -func (ipp *InProcessCollector) PrepareConfig(configStr string) (configCleanup func(), err error) { - configCleanup = func() { - // NoOp - } - var logger *zap.Logger - logger, err = configureLogger() - if err != nil { - return configCleanup, err - } - ipp.logger = logger - ipp.configStr = configStr - return configCleanup, err -} - -func (ipp *InProcessCollector) Start(args StartParams) error { - settings := service.AppSettings{ - BuildInfo: component.BuildInfo{ - Command: "otelcol", - Version: version.Version, - }, - Factories: ipp.factories, - ParserProvider: parserprovider.NewInMemory(strings.NewReader(ipp.configStr)), - } - var err error - ipp.svc, err = service.New(settings) - if err != nil { - return err - } - ipp.svc.Command().SetArgs(args.CmdArgs) - - ipp.appDone = make(chan struct{}) - go func() { - defer close(ipp.appDone) - appErr := ipp.svc.Run() - if appErr != nil { - err = appErr - } - }() - - for state := range ipp.svc.GetStateChannel() { - switch state { - case service.Starting: - // NoOp - case service.Running: - return err - default: - err = fmt.Errorf("unable to start, otelcol state is %d", state) - } - } - return err -} - -func (ipp *InProcessCollector) Stop() (stopped bool, err error) { - if !ipp.stopped { - ipp.stopped = true - ipp.svc.Shutdown() - } - <-ipp.appDone - stopped = ipp.stopped - return stopped, err -} - -func (ipp *InProcessCollector) WatchResourceConsumption() error { - return nil -} - -func (ipp *InProcessCollector) GetProcessMon() *process.Process { - return nil -} - -func (ipp *InProcessCollector) GetTotalConsumption() *ResourceConsumption { - return &ResourceConsumption{ - CPUPercentAvg: 0, - CPUPercentMax: 0, - RAMMiBAvg: 0, - RAMMiBMax: 0, - } -} - -func (ipp *InProcessCollector) GetResourceConsumption() string { - return "" -} - -func configureLogger() (*zap.Logger, error) { - conf := zap.NewDevelopmentConfig() - conf.Level.SetLevel(zapcore.InfoLevel) - logger, err := conf.Build() - return logger, err -} diff --git a/internal/otel_collector/testbed/testbed/receivers.go b/internal/otel_collector/testbed/testbed/receivers.go index 89872f4b584..f3366243337 100644 --- a/internal/otel_collector/testbed/testbed/receivers.go +++ b/internal/otel_collector/testbed/testbed/receivers.go @@ -78,25 +78,22 @@ func (mb *DataReceiverBase) GetExporters() map[config.DataType]map[config.Compon return nil } -// OCDataReceiver implements OpenCensus format receiver. -type OCDataReceiver struct { +// ocDataReceiver implements OpenCensus format receiver. +type ocDataReceiver struct { DataReceiverBase traceReceiver component.TracesReceiver metricsReceiver component.MetricsReceiver } -// Ensure OCDataReceiver implements DataReceiver. -var _ DataReceiver = (*OCDataReceiver)(nil) - const DefaultOCPort = 56565 -// NewOCDataReceiver creates a new OCDataReceiver that will listen on the specified port after Start +// NewOCDataReceiver creates a new ocDataReceiver that will listen on the specified port after Start // is called. -func NewOCDataReceiver(port int) *OCDataReceiver { - return &OCDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +func NewOCDataReceiver(port int) DataReceiver { + return &ocDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} } -func (or *OCDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, _ consumer.Logs) error { +func (or *ocDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, _ consumer.Logs) error { factory := opencensusreceiver.NewFactory() cfg := factory.CreateDefaultConfig().(*opencensusreceiver.Config) cfg.NetAddr = confignet.NetAddr{Endpoint: fmt.Sprintf("localhost:%d", or.Port), Transport: "tcp"} @@ -114,14 +111,14 @@ func (or *OCDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, _ consu return or.metricsReceiver.Start(context.Background(), or) } -func (or *OCDataReceiver) Stop() error { +func (or *ocDataReceiver) Stop() error { if err := or.traceReceiver.Shutdown(context.Background()); err != nil { return err } return or.metricsReceiver.Shutdown(context.Background()) } -func (or *OCDataReceiver) GenConfigYAMLStr() string { +func (or *ocDataReceiver) GenConfigYAMLStr() string { // Note that this generates an exporter config for agent. return fmt.Sprintf(` opencensus: @@ -129,25 +126,25 @@ func (or *OCDataReceiver) GenConfigYAMLStr() string { insecure: true`, or.Port) } -func (or *OCDataReceiver) ProtocolName() string { +func (or *ocDataReceiver) ProtocolName() string { return "opencensus" } -// JaegerDataReceiver implements Jaeger format receiver. -type JaegerDataReceiver struct { +// jaegerDataReceiver implements Jaeger format receiver. +type jaegerDataReceiver struct { DataReceiverBase receiver component.TracesReceiver } -var _ DataReceiver = (*JaegerDataReceiver)(nil) - const DefaultJaegerPort = 14250 -func NewJaegerDataReceiver(port int) *JaegerDataReceiver { - return &JaegerDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +// NewJaegerDataReceiver creates a new Jaeger DataReceiver that will listen on the specified port after Start +// is called. +func NewJaegerDataReceiver(port int) DataReceiver { + return &jaegerDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} } -func (jr *JaegerDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ consumer.Logs) error { +func (jr *jaegerDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ consumer.Logs) error { factory := jaegerreceiver.NewFactory() cfg := factory.CreateDefaultConfig().(*jaegerreceiver.Config) cfg.Protocols.GRPC = &configgrpc.GRPCServerSettings{ @@ -163,11 +160,11 @@ func (jr *JaegerDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ co return jr.receiver.Start(context.Background(), jr) } -func (jr *JaegerDataReceiver) Stop() error { +func (jr *jaegerDataReceiver) Stop() error { return jr.receiver.Shutdown(context.Background()) } -func (jr *JaegerDataReceiver) GenConfigYAMLStr() string { +func (jr *jaegerDataReceiver) GenConfigYAMLStr() string { // Note that this generates an exporter config for agent. return fmt.Sprintf(` jaeger: @@ -175,7 +172,7 @@ func (jr *JaegerDataReceiver) GenConfigYAMLStr() string { insecure: true`, jr.Port) } -func (jr *JaegerDataReceiver) ProtocolName() string { +func (jr *jaegerDataReceiver) ProtocolName() string { return "jaeger" } @@ -261,6 +258,8 @@ func (bor *BaseOTLPDataReceiver) GenConfigYAMLStr() string { const DefaultOTLPPort = 4317 +var _ DataReceiver = (*BaseOTLPDataReceiver)(nil) + // NewOTLPDataReceiver creates a new OTLP DataReceiver that will listen on the specified port after Start // is called. func NewOTLPDataReceiver(port int) *BaseOTLPDataReceiver { @@ -279,19 +278,19 @@ func NewOTLPHTTPDataReceiver(port int) *BaseOTLPDataReceiver { } } -// ZipkinDataReceiver implements Zipkin format receiver. -type ZipkinDataReceiver struct { +// zipkinDataReceiver implements Zipkin format receiver. +type zipkinDataReceiver struct { DataReceiverBase receiver component.TracesReceiver } -var _ DataReceiver = (*ZipkinDataReceiver)(nil) - -func NewZipkinDataReceiver(port int) *ZipkinDataReceiver { - return &ZipkinDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +// NewZipkinDataReceiver creates a new Zipkin DataReceiver that will listen on the specified port after Start +// is called. +func NewZipkinDataReceiver(port int) DataReceiver { + return &zipkinDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} } -func (zr *ZipkinDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ consumer.Logs) error { +func (zr *zipkinDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ consumer.Logs) error { factory := zipkinreceiver.NewFactory() cfg := factory.CreateDefaultConfig().(*zipkinreceiver.Config) cfg.Endpoint = fmt.Sprintf("localhost:%d", zr.Port) @@ -307,11 +306,11 @@ func (zr *ZipkinDataReceiver) Start(tc consumer.Traces, _ consumer.Metrics, _ co return zr.receiver.Start(context.Background(), zr) } -func (zr *ZipkinDataReceiver) Stop() error { +func (zr *zipkinDataReceiver) Stop() error { return zr.receiver.Shutdown(context.Background()) } -func (zr *ZipkinDataReceiver) GenConfigYAMLStr() string { +func (zr *zipkinDataReceiver) GenConfigYAMLStr() string { // Note that this generates an exporter config for agent. return fmt.Sprintf(` zipkin: @@ -319,24 +318,22 @@ func (zr *ZipkinDataReceiver) GenConfigYAMLStr() string { format: json`, zr.Port) } -func (zr *ZipkinDataReceiver) ProtocolName() string { +func (zr *zipkinDataReceiver) ProtocolName() string { return "zipkin" } // prometheus -type PrometheusDataReceiver struct { +type prometheusDataReceiver struct { DataReceiverBase receiver component.MetricsReceiver } -var _ DataReceiver = (*PrometheusDataReceiver)(nil) - -func NewPrometheusDataReceiver(port int) *PrometheusDataReceiver { - return &PrometheusDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} +func NewPrometheusDataReceiver(port int) DataReceiver { + return &prometheusDataReceiver{DataReceiverBase: DataReceiverBase{Port: port}} } -func (dr *PrometheusDataReceiver) Start(_ consumer.Traces, mc consumer.Metrics, _ consumer.Logs) error { +func (dr *prometheusDataReceiver) Start(_ consumer.Traces, mc consumer.Metrics, _ consumer.Logs) error { factory := prometheusreceiver.NewFactory() cfg := factory.CreateDefaultConfig().(*prometheusreceiver.Config) addr := fmt.Sprintf("0.0.0.0:%d", dr.Port) @@ -367,11 +364,11 @@ func (dr *PrometheusDataReceiver) Start(_ consumer.Traces, mc consumer.Metrics, return dr.receiver.Start(context.Background(), dr) } -func (dr *PrometheusDataReceiver) Stop() error { +func (dr *prometheusDataReceiver) Stop() error { return dr.receiver.Shutdown(context.Background()) } -func (dr *PrometheusDataReceiver) GenConfigYAMLStr() string { +func (dr *prometheusDataReceiver) GenConfigYAMLStr() string { format := ` prometheus: endpoint: "localhost:%d" @@ -379,6 +376,6 @@ func (dr *PrometheusDataReceiver) GenConfigYAMLStr() string { return fmt.Sprintf(format, dr.Port) } -func (dr *PrometheusDataReceiver) ProtocolName() string { +func (dr *prometheusDataReceiver) ProtocolName() string { return "prometheus" } diff --git a/internal/otel_collector/testbed/testbed/senders.go b/internal/otel_collector/testbed/testbed/senders.go index 3efc1d85236..b24239321db 100644 --- a/internal/otel_collector/testbed/testbed/senders.go +++ b/internal/otel_collector/testbed/testbed/senders.go @@ -108,24 +108,24 @@ func (dsb *DataSenderBase) Flush() { // Exporter interface does not support Flush, so nothing to do. } -// JaegerGRPCDataSender implements TraceDataSender for Jaeger thrift_http exporter. -type JaegerGRPCDataSender struct { +// jaegerGRPCDataSender implements TraceDataSender for Jaeger thrift_http exporter. +type jaegerGRPCDataSender struct { DataSenderBase consumer.Traces } -// Ensure JaegerGRPCDataSender implements TraceDataSender. -var _ TraceDataSender = (*JaegerGRPCDataSender)(nil) +// Ensure jaegerGRPCDataSender implements TraceDataSender. +var _ TraceDataSender = (*jaegerGRPCDataSender)(nil) // NewJaegerGRPCDataSender creates a new Jaeger exporter sender that will send // to the specified port after Start is called. -func NewJaegerGRPCDataSender(host string, port int) *JaegerGRPCDataSender { - return &JaegerGRPCDataSender{ +func NewJaegerGRPCDataSender(host string, port int) TraceDataSender { + return &jaegerGRPCDataSender{ DataSenderBase: DataSenderBase{Port: port, Host: host}, } } -func (je *JaegerGRPCDataSender) Start() error { +func (je *jaegerGRPCDataSender) Start() error { factory := jaegerexporter.NewFactory() cfg := factory.CreateDefaultConfig().(*jaegerexporter.Config) // Disable retries, we should push data and if error just log it. @@ -146,7 +146,7 @@ func (je *JaegerGRPCDataSender) Start() error { return exp.Start(context.Background(), je) } -func (je *JaegerGRPCDataSender) GenConfigYAMLStr() string { +func (je *jaegerGRPCDataSender) GenConfigYAMLStr() string { return fmt.Sprintf(` jaeger: protocols: @@ -154,7 +154,7 @@ func (je *JaegerGRPCDataSender) GenConfigYAMLStr() string { endpoint: "%s"`, je.GetEndpoint()) } -func (je *JaegerGRPCDataSender) ProtocolName() string { +func (je *jaegerGRPCDataSender) ProtocolName() string { return "jaeger" } @@ -181,19 +181,16 @@ func (ods *ocDataSender) ProtocolName() string { return "opencensus" } -// OCTraceDataSender implements TraceDataSender for OpenCensus trace exporter. -type OCTraceDataSender struct { +// ocTracesDataSender implements TraceDataSender for OpenCensus trace exporter. +type ocTracesDataSender struct { ocDataSender consumer.Traces } -// Ensure OCTraceDataSender implements TraceDataSender. -var _ TraceDataSender = (*OCTraceDataSender)(nil) - -// NewOCTraceDataSender creates a new OCTraceDataSender that will send +// NewOCTraceDataSender creates a new ocTracesDataSender that will send // to the specified port after Start is called. -func NewOCTraceDataSender(host string, port int) *OCTraceDataSender { - return &OCTraceDataSender{ +func NewOCTraceDataSender(host string, port int) TraceDataSender { + return &ocTracesDataSender{ ocDataSender: ocDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -203,7 +200,7 @@ func NewOCTraceDataSender(host string, port int) *OCTraceDataSender { } } -func (ote *OCTraceDataSender) Start() error { +func (ote *ocTracesDataSender) Start() error { factory := opencensusexporter.NewFactory() cfg := ote.fillConfig(factory.CreateDefaultConfig().(*opencensusexporter.Config)) exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) @@ -215,19 +212,16 @@ func (ote *OCTraceDataSender) Start() error { return exp.Start(context.Background(), ote) } -// OCMetricsDataSender implements MetricDataSender for OpenCensus metrics exporter. -type OCMetricsDataSender struct { +// ocMetricsDataSender implements MetricDataSender for OpenCensus metrics exporter. +type ocMetricsDataSender struct { ocDataSender consumer.Metrics } -// Ensure OCMetricsDataSender implements MetricDataSender. -var _ MetricDataSender = (*OCMetricsDataSender)(nil) - // NewOCMetricDataSender creates a new OpenCensus metric exporter sender that will send // to the specified port after Start is called. -func NewOCMetricDataSender(host string, port int) *OCMetricsDataSender { - return &OCMetricsDataSender{ +func NewOCMetricDataSender(host string, port int) MetricDataSender { + return &ocMetricsDataSender{ ocDataSender: ocDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -237,7 +231,7 @@ func NewOCMetricDataSender(host string, port int) *OCMetricsDataSender { } } -func (ome *OCMetricsDataSender) Start() error { +func (ome *ocMetricsDataSender) Start() error { factory := opencensusexporter.NewFactory() cfg := ome.fillConfig(factory.CreateDefaultConfig().(*opencensusexporter.Config)) exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) @@ -278,18 +272,15 @@ func (ods *otlpHTTPDataSender) ProtocolName() string { return "otlp" } -// OTLPHTTPTraceDataSender implements TraceDataSender for OTLP/HTTP trace exporter. -type OTLPHTTPTraceDataSender struct { +// otlpHTTPTraceDataSender implements TraceDataSender for OTLP/HTTP trace exporter. +type otlpHTTPTraceDataSender struct { otlpHTTPDataSender consumer.Traces } -// Ensure OTLPHTTPTraceDataSender implements TraceDataSender. -var _ TraceDataSender = (*OTLPHTTPTraceDataSender)(nil) - // NewOTLPHTTPTraceDataSender creates a new TraceDataSender for OTLP/HTTP traces exporter. -func NewOTLPHTTPTraceDataSender(host string, port int) *OTLPHTTPTraceDataSender { - return &OTLPHTTPTraceDataSender{ +func NewOTLPHTTPTraceDataSender(host string, port int) TraceDataSender { + return &otlpHTTPTraceDataSender{ otlpHTTPDataSender: otlpHTTPDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -299,7 +290,7 @@ func NewOTLPHTTPTraceDataSender(host string, port int) *OTLPHTTPTraceDataSender } } -func (ote *OTLPHTTPTraceDataSender) Start() error { +func (ote *otlpHTTPTraceDataSender) Start() error { factory := otlphttpexporter.NewFactory() cfg := ote.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) @@ -311,19 +302,16 @@ func (ote *OTLPHTTPTraceDataSender) Start() error { return exp.Start(context.Background(), ote) } -// OTLPHTTPMetricsDataSender implements MetricDataSender for OTLP/HTTP metrics exporter. -type OTLPHTTPMetricsDataSender struct { +// otlpHTTPMetricsDataSender implements MetricDataSender for OTLP/HTTP metrics exporter. +type otlpHTTPMetricsDataSender struct { otlpHTTPDataSender consumer.Metrics } -// Ensure OTLPHTTPMetricsDataSender implements MetricDataSender. -var _ MetricDataSender = (*OTLPHTTPMetricsDataSender)(nil) - // NewOTLPHTTPMetricDataSender creates a new OTLP/HTTP metrics exporter sender that will send // to the specified port after Start is called. -func NewOTLPHTTPMetricDataSender(host string, port int) *OTLPHTTPMetricsDataSender { - return &OTLPHTTPMetricsDataSender{ +func NewOTLPHTTPMetricDataSender(host string, port int) MetricDataSender { + return &otlpHTTPMetricsDataSender{ otlpHTTPDataSender: otlpHTTPDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -333,7 +321,7 @@ func NewOTLPHTTPMetricDataSender(host string, port int) *OTLPHTTPMetricsDataSend } } -func (ome *OTLPHTTPMetricsDataSender) Start() error { +func (ome *otlpHTTPMetricsDataSender) Start() error { factory := otlphttpexporter.NewFactory() cfg := ome.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) @@ -345,19 +333,16 @@ func (ome *OTLPHTTPMetricsDataSender) Start() error { return exp.Start(context.Background(), ome) } -// OTLPHTTPLogsDataSender implements LogsDataSender for OTLP/HTTP logs exporter. -type OTLPHTTPLogsDataSender struct { +// otlpHTTPLogsDataSender implements LogsDataSender for OTLP/HTTP logs exporter. +type otlpHTTPLogsDataSender struct { otlpHTTPDataSender consumer.Logs } -// Ensure OTLPHTTPLogsDataSender implements MetricDataSender. -var _ LogDataSender = (*OTLPHTTPLogsDataSender)(nil) - // NewOTLPHTTPLogsDataSender creates a new OTLP/HTTP logs exporter sender that will send // to the specified port after Start is called. -func NewOTLPHTTPLogsDataSender(host string, port int) *OTLPHTTPLogsDataSender { - return &OTLPHTTPLogsDataSender{ +func NewOTLPHTTPLogsDataSender(host string, port int) LogDataSender { + return &otlpHTTPLogsDataSender{ otlpHTTPDataSender: otlpHTTPDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -367,7 +352,7 @@ func NewOTLPHTTPLogsDataSender(host string, port int) *OTLPHTTPLogsDataSender { } } -func (olds *OTLPHTTPLogsDataSender) Start() error { +func (olds *otlpHTTPLogsDataSender) Start() error { factory := otlphttpexporter.NewFactory() cfg := olds.fillConfig(factory.CreateDefaultConfig().(*otlphttpexporter.Config)) exp, err := factory.CreateLogsExporter(context.Background(), defaultExporterParams(), cfg) @@ -408,18 +393,15 @@ func (ods *otlpDataSender) ProtocolName() string { return "otlp" } -// OTLPTraceDataSender implements TraceDataSender for OTLP traces exporter. -type OTLPTraceDataSender struct { +// otlpTraceDataSender implements TraceDataSender for OTLP traces exporter. +type otlpTraceDataSender struct { otlpDataSender consumer.Traces } -// Ensure OTLPTraceDataSender implements TraceDataSender. -var _ TraceDataSender = (*OTLPTraceDataSender)(nil) - // NewOTLPTraceDataSender creates a new TraceDataSender for OTLP traces exporter. -func NewOTLPTraceDataSender(host string, port int) *OTLPTraceDataSender { - return &OTLPTraceDataSender{ +func NewOTLPTraceDataSender(host string, port int) TraceDataSender { + return &otlpTraceDataSender{ otlpDataSender: otlpDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -429,7 +411,7 @@ func NewOTLPTraceDataSender(host string, port int) *OTLPTraceDataSender { } } -func (ote *OTLPTraceDataSender) Start() error { +func (ote *otlpTraceDataSender) Start() error { factory := otlpexporter.NewFactory() cfg := ote.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) exp, err := factory.CreateTracesExporter(context.Background(), defaultExporterParams(), cfg) @@ -441,19 +423,16 @@ func (ote *OTLPTraceDataSender) Start() error { return exp.Start(context.Background(), ote) } -// OTLPMetricsDataSender implements MetricDataSender for OTLP metrics exporter. -type OTLPMetricsDataSender struct { +// otlpMetricsDataSender implements MetricDataSender for OTLP metrics exporter. +type otlpMetricsDataSender struct { otlpDataSender consumer.Metrics } -// Ensure OTLPMetricsDataSender implements MetricDataSender. -var _ MetricDataSender = (*OTLPMetricsDataSender)(nil) - // NewOTLPMetricDataSender creates a new OTLP metric exporter sender that will send // to the specified port after Start is called. -func NewOTLPMetricDataSender(host string, port int) *OTLPMetricsDataSender { - return &OTLPMetricsDataSender{ +func NewOTLPMetricDataSender(host string, port int) MetricDataSender { + return &otlpMetricsDataSender{ otlpDataSender: otlpDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -463,7 +442,7 @@ func NewOTLPMetricDataSender(host string, port int) *OTLPMetricsDataSender { } } -func (ome *OTLPMetricsDataSender) Start() error { +func (ome *otlpMetricsDataSender) Start() error { factory := otlpexporter.NewFactory() cfg := ome.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) exp, err := factory.CreateMetricsExporter(context.Background(), defaultExporterParams(), cfg) @@ -475,19 +454,16 @@ func (ome *OTLPMetricsDataSender) Start() error { return exp.Start(context.Background(), ome) } -// OTLPLogsDataSender implements LogsDataSender for OTLP logs exporter. -type OTLPLogsDataSender struct { +// otlpLogsDataSender implements LogsDataSender for OTLP logs exporter. +type otlpLogsDataSender struct { otlpDataSender consumer.Logs } -// Ensure OTLPLogsDataSender implements LogDataSender. -var _ LogDataSender = (*OTLPLogsDataSender)(nil) - // NewOTLPLogsDataSender creates a new OTLP logs exporter sender that will send // to the specified port after Start is called. -func NewOTLPLogsDataSender(host string, port int) *OTLPLogsDataSender { - return &OTLPLogsDataSender{ +func NewOTLPLogsDataSender(host string, port int) LogDataSender { + return &otlpLogsDataSender{ otlpDataSender: otlpDataSender{ DataSenderBase: DataSenderBase{ Port: port, @@ -497,7 +473,7 @@ func NewOTLPLogsDataSender(host string, port int) *OTLPLogsDataSender { } } -func (olds *OTLPLogsDataSender) Start() error { +func (olds *otlpLogsDataSender) Start() error { factory := otlpexporter.NewFactory() cfg := olds.fillConfig(factory.CreateDefaultConfig().(*otlpexporter.Config)) exp, err := factory.CreateLogsExporter(context.Background(), defaultExporterParams(), cfg) @@ -509,19 +485,16 @@ func (olds *OTLPLogsDataSender) Start() error { return exp.Start(context.Background(), olds) } -// ZipkinDataSender implements TraceDataSender for Zipkin http exporter. -type ZipkinDataSender struct { +// zipkinDataSender implements TraceDataSender for Zipkin http exporter. +type zipkinDataSender struct { DataSenderBase consumer.Traces } -// Ensure ZipkinDataSender implements TraceDataSender. -var _ TraceDataSender = (*ZipkinDataSender)(nil) - // NewZipkinDataSender creates a new Zipkin exporter sender that will send // to the specified port after Start is called. -func NewZipkinDataSender(host string, port int) *ZipkinDataSender { - return &ZipkinDataSender{ +func NewZipkinDataSender(host string, port int) TraceDataSender { + return &zipkinDataSender{ DataSenderBase: DataSenderBase{ Port: port, Host: host, @@ -529,7 +502,7 @@ func NewZipkinDataSender(host string, port int) *ZipkinDataSender { } } -func (zs *ZipkinDataSender) Start() error { +func (zs *zipkinDataSender) Start() error { factory := zipkinexporter.NewFactory() cfg := factory.CreateDefaultConfig().(*zipkinexporter.Config) cfg.Endpoint = fmt.Sprintf("http://%s/api/v2/spans", zs.GetEndpoint()) @@ -547,28 +520,28 @@ func (zs *ZipkinDataSender) Start() error { return exp.Start(context.Background(), zs) } -func (zs *ZipkinDataSender) GenConfigYAMLStr() string { +func (zs *zipkinDataSender) GenConfigYAMLStr() string { return fmt.Sprintf(` zipkin: endpoint: %s`, zs.GetEndpoint()) } -func (zs *ZipkinDataSender) ProtocolName() string { +func (zs *zipkinDataSender) ProtocolName() string { return "zipkin" } // prometheus -type PrometheusDataSender struct { +type prometheusDataSender struct { DataSenderBase consumer.Metrics namespace string } -var _ MetricDataSender = (*PrometheusDataSender)(nil) - -func NewPrometheusDataSender(host string, port int) *PrometheusDataSender { - return &PrometheusDataSender{ +// NewPrometheusDataSender creates a new Prometheus sender that will expose data +// on the specified port after Start is called. +func NewPrometheusDataSender(host string, port int) MetricDataSender { + return &prometheusDataSender{ DataSenderBase: DataSenderBase{ Port: port, Host: host, @@ -576,7 +549,7 @@ func NewPrometheusDataSender(host string, port int) *PrometheusDataSender { } } -func (pds *PrometheusDataSender) Start() error { +func (pds *prometheusDataSender) Start() error { factory := prometheusexporter.NewFactory() cfg := factory.CreateDefaultConfig().(*prometheusexporter.Config) cfg.Endpoint = pds.GetEndpoint().String() @@ -591,7 +564,7 @@ func (pds *PrometheusDataSender) Start() error { return exp.Start(context.Background(), pds) } -func (pds *PrometheusDataSender) GenConfigYAMLStr() string { +func (pds *prometheusDataSender) GenConfigYAMLStr() string { format := ` prometheus: config: @@ -604,7 +577,7 @@ func (pds *PrometheusDataSender) GenConfigYAMLStr() string { return fmt.Sprintf(format, pds.GetEndpoint()) } -func (pds *PrometheusDataSender) ProtocolName() string { +func (pds *prometheusDataSender) ProtocolName() string { return "prometheus" } diff --git a/internal/otel_collector/testbed/testbed/test_bed.go b/internal/otel_collector/testbed/testbed/test_bed.go index e10f5710b6f..abe9da138dc 100644 --- a/internal/otel_collector/testbed/testbed/test_bed.go +++ b/internal/otel_collector/testbed/testbed/test_bed.go @@ -46,19 +46,20 @@ func SaveResults(resultsSummary TestResultsSummary) { const testBedEnableEnvVarName = "RUN_TESTBED" +// GlobalConfig global config for testbed. var GlobalConfig = struct { - // Relative path to default agent executable to test. + // DefaultAgentExeRelativeFile relative path to default agent executable to test. // Can be set in the contrib repo to use a different executable name. // Set this before calling DoTestMain(). // // If used in the path, {{.GOOS}} and {{.GOARCH}} will be expanded to the current // OS and ARCH correspondingly. // - // Individual tests can override this by setting the AgentExePath of ChildProcess + // Individual tests can override this by setting the AgentExePath of childProcessCollector // that is passed to the TestCase. DefaultAgentExeRelativeFile string }{ - // The default exe that is produced by Makefile "otelcol" target relative + // DefaultAgentExeRelativeFile the default exe that is produced by Makefile "otelcol" target relative // to testbed/tests directory. DefaultAgentExeRelativeFile: "../../bin/otelcol_{{.GOOS}}_{{.GOARCH}}", } diff --git a/internal/otel_collector/testbed/testbed/test_case.go b/internal/otel_collector/testbed/testbed/test_case.go index 010bb23afb2..5e038947b96 100644 --- a/internal/otel_collector/testbed/testbed/test_case.go +++ b/internal/otel_collector/testbed/testbed/test_case.go @@ -49,7 +49,7 @@ type TestCase struct { agentProc OtelcolRunner Sender DataSender - Receiver DataReceiver + receiver DataReceiver LoadGenerator *LoadGenerator MockBackend *MockBackend @@ -57,19 +57,15 @@ type TestCase struct { startTime time.Time - // ErrorSignal indicates an error in the test case execution, e.g. process execution + // errorSignal indicates an error in the test case execution, e.g. process execution // failure or exceeding resource consumption, etc. The actual error message is already // logged, this is only an indicator on which you can wait to be informed. - ErrorSignal chan struct{} - + errorSignal chan struct{} // Duration is the requested duration of the tests. Configured via TESTBED_DURATION // env variable and defaults to 15 seconds if env variable is unspecified. - Duration time.Duration - - doneSignal chan struct{} - - errorCause string - + Duration time.Duration + doneSignal chan struct{} + errorCause string resultsSummary TestResultsSummary } @@ -87,17 +83,17 @@ func NewTestCase( resultsSummary TestResultsSummary, opts ...TestCaseOption, ) *TestCase { - tc := TestCase{} - - tc.t = t - tc.ErrorSignal = make(chan struct{}) - tc.doneSignal = make(chan struct{}) - tc.startTime = time.Now() - tc.Sender = sender - tc.Receiver = receiver - tc.agentProc = agentProc - tc.validator = validator - tc.resultsSummary = resultsSummary + tc := TestCase{ + t: t, + errorSignal: make(chan struct{}), + doneSignal: make(chan struct{}), + startTime: time.Now(), + Sender: sender, + receiver: receiver, + agentProc: agentProc, + validator: validator, + resultsSummary: resultsSummary, + } // Get requested test case duration from env variable. duration := os.Getenv(testcaseDurationVar) @@ -112,7 +108,7 @@ func NewTestCase( // Apply all provided options. for _, opt := range opts { - opt.Apply(&tc) + opt(&tc) } // Prepare directory for results. @@ -143,22 +139,6 @@ func (tc *TestCase) composeTestResultFileName(fileName string) string { return fileName } -// SetResourceLimits sets expected limits for resource consmption. -// Error is signaled if consumption during ResourceCheckPeriod exceeds the limits. -// Limits are modified only for non-zero fields of resourceSpec, all zero-value fields -// fo resourceSpec are ignored and their previous values remain in effect. -func (tc *TestCase) SetResourceLimits(resourceSpec ResourceSpec) { - if resourceSpec.ExpectedMaxCPU > 0 { - tc.resourceSpec.ExpectedMaxCPU = resourceSpec.ExpectedMaxCPU - } - if resourceSpec.ExpectedMaxRAM > 0 { - tc.resourceSpec.ExpectedMaxRAM = resourceSpec.ExpectedMaxRAM - } - if resourceSpec.ResourceCheckPeriod > 0 { - tc.resourceSpec.ResourceCheckPeriod = resourceSpec.ResourceCheckPeriod - } -} - // StartAgent starts the agent and redirects its standard output and standard error // to "agent.log" file located in the test directory. func (tc *TestCase) StartAgent(args ...string) { @@ -270,7 +250,7 @@ func (tc *TestCase) Stop() { // instance(s) under test by the LoadGenerator. func (tc *TestCase) ValidateData() { select { - case <-tc.ErrorSignal: + case <-tc.errorSignal: // Error is already signaled and recorded. Validating data is pointless. return default: @@ -283,7 +263,7 @@ func (tc *TestCase) ValidateData() { func (tc *TestCase) Sleep(d time.Duration) { select { case <-time.After(d): - case <-tc.ErrorSignal: + case <-tc.errorSignal: } } @@ -304,7 +284,7 @@ func (tc *TestCase) WaitForN(cond func() bool, duration time.Duration, errMsg .. select { case <-time.After(waitInterval): - case <-tc.ErrorSignal: + case <-tc.errorSignal: return false } @@ -336,7 +316,7 @@ func (tc *TestCase) indicateError(err error) { tc.errorCause = err.Error() // Signal the error via channel - close(tc.ErrorSignal) + close(tc.errorSignal) } func (tc *TestCase) logStats() { diff --git a/internal/otel_collector/testbed/testbed/validator.go b/internal/otel_collector/testbed/testbed/validator.go index 5c4a40cfc29..9093a434160 100644 --- a/internal/otel_collector/testbed/testbed/validator.go +++ b/internal/otel_collector/testbed/testbed/validator.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -127,7 +127,7 @@ func (v *CorrectnessTestValidator) RecordResults(tc *TestCase) { func (v *CorrectnessTestValidator) assertSentRecdTracingDataEqual(tracesList []pdata.Traces) { spansMap := make(map[string]pdata.Span) // TODO: Remove this hack, and add a way to retrieve all sent data. - if val, ok := v.dataProvider.(*GoldenDataProvider); ok { + if val, ok := v.dataProvider.(*goldenDataProvider); ok { populateSpansMap(spansMap, val.tracesGenerated) } diff --git a/internal/otel_collector/testbed/tests/scenarios.go b/internal/otel_collector/testbed/tests/scenarios.go index cce1ed27b54..c7b5db95362 100644 --- a/internal/otel_collector/testbed/tests/scenarios.go +++ b/internal/otel_collector/testbed/tests/scenarios.go @@ -152,7 +152,7 @@ func Scenario10kItemsPerSecond( ItemsPerBatch: 100, Parallel: 1, } - agentProc := &testbed.ChildProcess{} + agentProc := testbed.NewChildProcessCollector() configStr := createConfigYaml(t, sender, receiver, resultDir, processors, extensions) configCleanup, err := agentProc.PrepareConfig(configStr) @@ -168,10 +168,10 @@ func Scenario10kItemsPerSecond( agentProc, &testbed.PerfTestValidator{}, resultsSummary, + testbed.WithResourceLimits(resourceSpec), ) defer tc.Stop() - tc.SetResourceLimits(resourceSpec) tc.StartBackend() tc.StartAgent("--log-level=debug") @@ -217,7 +217,7 @@ func Scenario1kSPSWithAttrs(t *testing.T, args []string, tests []TestCase, proce options := constructLoadOptions(test) - agentProc := &testbed.ChildProcess{} + agentProc := testbed.NewChildProcessCollector() // Prepare results dir. resultDir, err := filepath.Abs(path.Join("results", t.Name())) @@ -241,14 +241,10 @@ func Scenario1kSPSWithAttrs(t *testing.T, args []string, tests []TestCase, proce agentProc, &testbed.PerfTestValidator{}, test.resultsSummary, + testbed.WithResourceLimits(testbed.ResourceSpec{ExpectedMaxCPU: test.expectedMaxCPU, ExpectedMaxRAM: test.expectedMaxRAM}), ) defer tc.Stop() - tc.SetResourceLimits(testbed.ResourceSpec{ - ExpectedMaxCPU: test.expectedMaxCPU, - ExpectedMaxRAM: test.expectedMaxRAM, - }) - tc.StartBackend() tc.StartAgent(args...) @@ -290,7 +286,7 @@ func ScenarioTestTraceNoBackend10kSPS( require.NoError(t, err) options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} - agentProc := &testbed.ChildProcess{} + agentProc := testbed.NewChildProcessCollector() configStr := createConfigYaml(t, sender, receiver, resultDir, configuration.Processor, nil) configCleanup, err := agentProc.PrepareConfig(configStr) require.NoError(t, err) @@ -305,12 +301,11 @@ func ScenarioTestTraceNoBackend10kSPS( agentProc, &testbed.PerfTestValidator{}, resultsSummary, + testbed.WithResourceLimits(resourceSpec), ) defer tc.Stop() - tc.SetResourceLimits(resourceSpec) - tc.StartAgent() tc.StartLoad(options) diff --git a/internal/otel_collector/testutil/metricstestutil/metricsutil.go b/internal/otel_collector/testutil/metricstestutil/metricsutil.go index 86dbc809a0e..69d239385b9 100644 --- a/internal/otel_collector/testutil/metricstestutil/metricsutil.go +++ b/internal/otel_collector/testutil/metricstestutil/metricsutil.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Gauge creates a gauge metric. @@ -172,13 +172,13 @@ func SortedMetrics(metrics pdata.Metrics) pdata.Metrics { for l := 0; l < m.IntSum().DataPoints().Len(); l++ { m.IntSum().DataPoints().At(l).LabelsMap().Sort() } - case pdata.MetricDataTypeDoubleGauge: - for l := 0; l < m.DoubleGauge().DataPoints().Len(); l++ { - m.DoubleGauge().DataPoints().At(l).LabelsMap().Sort() + case pdata.MetricDataTypeGauge: + for l := 0; l < m.Gauge().DataPoints().Len(); l++ { + m.Gauge().DataPoints().At(l).LabelsMap().Sort() } - case pdata.MetricDataTypeDoubleSum: - for l := 0; l < m.DoubleSum().DataPoints().Len(); l++ { - m.DoubleSum().DataPoints().At(l).LabelsMap().Sort() + case pdata.MetricDataTypeSum: + for l := 0; l < m.Sum().DataPoints().Len(); l++ { + m.Sum().DataPoints().At(l).LabelsMap().Sort() } case pdata.MetricDataTypeIntHistogram: for l := 0; l < m.IntHistogram().DataPoints().Len(); l++ { diff --git a/internal/otel_collector/translator/internaldata/metrics_to_oc.go b/internal/otel_collector/translator/internaldata/metrics_to_oc.go index e8fbbe4cdd9..c85a823ed9a 100644 --- a/internal/otel_collector/translator/internaldata/metrics_to_oc.go +++ b/internal/otel_collector/translator/internaldata/metrics_to_oc.go @@ -22,7 +22,7 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/golang/protobuf/ptypes/wrappers" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) type labelKeys struct { @@ -88,12 +88,12 @@ func collectLabelKeys(metric pdata.Metric) *labelKeys { switch metric.DataType() { case pdata.MetricDataTypeIntGauge: collectLabelKeysIntDataPoints(metric.IntGauge().DataPoints(), keySet) - case pdata.MetricDataTypeDoubleGauge: - collectLabelKeysDoubleDataPoints(metric.DoubleGauge().DataPoints(), keySet) + case pdata.MetricDataTypeGauge: + collectLabelKeysDoubleDataPoints(metric.Gauge().DataPoints(), keySet) case pdata.MetricDataTypeIntSum: collectLabelKeysIntDataPoints(metric.IntSum().DataPoints(), keySet) - case pdata.MetricDataTypeDoubleSum: - collectLabelKeysDoubleDataPoints(metric.DoubleSum().DataPoints(), keySet) + case pdata.MetricDataTypeSum: + collectLabelKeysDoubleDataPoints(metric.Sum().DataPoints(), keySet) case pdata.MetricDataTypeIntHistogram: collectLabelKeysIntHistogramDataPoints(metric.IntHistogram().DataPoints(), keySet) case pdata.MetricDataTypeHistogram: @@ -185,7 +185,7 @@ func descriptorTypeToOC(metric pdata.Metric) ocmetrics.MetricDescriptor_Type { switch metric.DataType() { case pdata.MetricDataTypeIntGauge: return ocmetrics.MetricDescriptor_GAUGE_INT64 - case pdata.MetricDataTypeDoubleGauge: + case pdata.MetricDataTypeGauge: return ocmetrics.MetricDescriptor_GAUGE_DOUBLE case pdata.MetricDataTypeIntSum: sd := metric.IntSum() @@ -193,8 +193,8 @@ func descriptorTypeToOC(metric pdata.Metric) ocmetrics.MetricDescriptor_Type { return ocmetrics.MetricDescriptor_CUMULATIVE_INT64 } return ocmetrics.MetricDescriptor_GAUGE_INT64 - case pdata.MetricDataTypeDoubleSum: - sd := metric.DoubleSum() + case pdata.MetricDataTypeSum: + sd := metric.Sum() if sd.IsMonotonic() && sd.AggregationTemporality() == pdata.AggregationTemporalityCumulative { return ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE } @@ -221,12 +221,12 @@ func dataPointsToTimeseries(metric pdata.Metric, labelKeys *labelKeys) []*ocmetr switch metric.DataType() { case pdata.MetricDataTypeIntGauge: return intPointsToOC(metric.IntGauge().DataPoints(), labelKeys) - case pdata.MetricDataTypeDoubleGauge: - return doublePointToOC(metric.DoubleGauge().DataPoints(), labelKeys) + case pdata.MetricDataTypeGauge: + return doublePointToOC(metric.Gauge().DataPoints(), labelKeys) case pdata.MetricDataTypeIntSum: return intPointsToOC(metric.IntSum().DataPoints(), labelKeys) - case pdata.MetricDataTypeDoubleSum: - return doublePointToOC(metric.DoubleSum().DataPoints(), labelKeys) + case pdata.MetricDataTypeSum: + return doublePointToOC(metric.Sum().DataPoints(), labelKeys) case pdata.MetricDataTypeIntHistogram: return intHistogramPointToOC(metric.IntHistogram().DataPoints(), labelKeys) case pdata.MetricDataTypeHistogram: diff --git a/internal/otel_collector/translator/internaldata/oc_to_metrics.go b/internal/otel_collector/translator/internaldata/oc_to_metrics.go index 2f328672b84..6a10193d071 100644 --- a/internal/otel_collector/translator/internaldata/oc_to_metrics.go +++ b/internal/otel_collector/translator/internaldata/oc_to_metrics.go @@ -19,7 +19,7 @@ import ( ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // OCToMetrics converts OC data format to data.MetricData. @@ -36,8 +36,7 @@ func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []* if len(metrics) == 0 { // At least one of the md.Node or md.Resource is not nil. Set the resource and return. - rms.Resize(initialRmsLen + 1) - ocNodeResourceToInternal(node, resource, rms.At(initialRmsLen).Resource()) + ocNodeResourceToInternal(node, resource, rms.AppendEmpty().Resource()) return dest } @@ -81,21 +80,18 @@ func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []* // +1 for all metrics with nil resource resourceCount++ } - rms.Resize(resourceCount) + rms.EnsureCapacity(resourceCount) // Translate "combinedMetrics" first if combinedMetricCount > 0 { - rm0 := rms.At(initialRmsLen) + rm0 := rms.AppendEmpty() ocNodeResourceToInternal(node, resource, rm0.Resource()) // Allocate a slice for metrics that need to be combined into first ResourceMetrics. ilms := rm0.InstrumentationLibraryMetrics() combinedMetrics := ilms.AppendEmpty().Metrics() - combinedMetrics.Resize(combinedMetricCount) - - // Index to next available slot in "combinedMetrics" slice. - combinedMetricIdx := 0 + combinedMetrics.EnsureCapacity(combinedMetricCount) for _, ocMetric := range metrics { if ocMetric == nil { @@ -110,18 +106,12 @@ func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []* // Add the metric to the "combinedMetrics". combinedMetrics length is equal // to combinedMetricCount. The loop above that calculates combinedMetricCount // has exact same conditions as we have here in this loop. - ocMetricToMetrics(ocMetric, combinedMetrics.At(combinedMetricIdx)) - combinedMetricIdx++ + ocMetricToMetrics(ocMetric, combinedMetrics.AppendEmpty()) } } // Translate distinct metrics - resourceMetricIdx := 0 - if combinedMetricCount > 0 { - // First resourcemetric is used for the default resource, so start with 1. - resourceMetricIdx = 1 - } for _, ocMetric := range metrics { if ocMetric == nil { // Skip nil metrics. @@ -135,8 +125,7 @@ func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []* // This metric has a different Resource and must be placed in a different // ResourceMetrics instance. Create a separate ResourceMetrics item just for this metric // and store at resourceMetricIdx. - ocMetricToResourceMetrics(ocMetric, node, rms.At(initialRmsLen+resourceMetricIdx)) - resourceMetricIdx++ + ocMetricToResourceMetrics(ocMetric, node, rms.AppendEmpty()) } return dest } @@ -173,8 +162,8 @@ func descriptorTypeToMetrics(t ocmetrics.MetricDescriptor_Type, metric pdata.Met metric.SetDataType(pdata.MetricDataTypeIntGauge) return pdata.MetricDataTypeIntGauge case ocmetrics.MetricDescriptor_GAUGE_DOUBLE: - metric.SetDataType(pdata.MetricDataTypeDoubleGauge) - return pdata.MetricDataTypeDoubleGauge + metric.SetDataType(pdata.MetricDataTypeGauge) + return pdata.MetricDataTypeGauge case ocmetrics.MetricDescriptor_CUMULATIVE_INT64: metric.SetDataType(pdata.MetricDataTypeIntSum) sum := metric.IntSum() @@ -182,11 +171,11 @@ func descriptorTypeToMetrics(t ocmetrics.MetricDescriptor_Type, metric pdata.Met sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) return pdata.MetricDataTypeIntSum case ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE: - metric.SetDataType(pdata.MetricDataTypeDoubleSum) - sum := metric.DoubleSum() + metric.SetDataType(pdata.MetricDataTypeSum) + sum := metric.Sum() sum.SetIsMonotonic(true) sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) - return pdata.MetricDataTypeDoubleSum + return pdata.MetricDataTypeSum case ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION: metric.SetDataType(pdata.MetricDataTypeHistogram) histo := metric.Histogram() @@ -205,12 +194,12 @@ func setDataPoints(ocMetric *ocmetrics.Metric, metric pdata.Metric) { switch metric.DataType() { case pdata.MetricDataTypeIntGauge: fillIntDataPoint(ocMetric, metric.IntGauge().DataPoints()) - case pdata.MetricDataTypeDoubleGauge: - fillDoubleDataPoint(ocMetric, metric.DoubleGauge().DataPoints()) + case pdata.MetricDataTypeGauge: + fillDoubleDataPoint(ocMetric, metric.Gauge().DataPoints()) case pdata.MetricDataTypeIntSum: fillIntDataPoint(ocMetric, metric.IntSum().DataPoints()) - case pdata.MetricDataTypeDoubleSum: - fillDoubleDataPoint(ocMetric, metric.DoubleSum().DataPoints()) + case pdata.MetricDataTypeSum: + fillDoubleDataPoint(ocMetric, metric.Sum().DataPoints()) case pdata.MetricDataTypeHistogram: fillDoubleHistogramDataPoint(ocMetric, metric.Histogram().DataPoints()) case pdata.MetricDataTypeSummary: @@ -242,9 +231,8 @@ func fillLabelsMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetric func fillIntDataPoint(ocMetric *ocmetrics.Metric, dps pdata.IntDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) - dps.Resize(ocPointsCount) + dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() - pos := 0 for _, timeseries := range ocMetric.GetTimeseries() { if timeseries == nil { continue @@ -256,23 +244,19 @@ func fillIntDataPoint(ocMetric *ocmetrics.Metric, dps pdata.IntDataPointSlice) { continue } - dp := dps.At(pos) - pos++ - + dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime())) fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) dp.SetValue(point.GetInt64Value()) } } - dps.Resize(pos) } func fillDoubleDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) - dps.Resize(ocPointsCount) + dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() - pos := 0 for _, timeseries := range ocMetric.GetTimeseries() { if timeseries == nil { continue @@ -284,23 +268,19 @@ func fillDoubleDataPoint(ocMetric *ocmetrics.Metric, dps pdata.DoubleDataPointSl continue } - dp := dps.At(pos) - pos++ - + dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime())) fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) dp.SetValue(point.GetDoubleValue()) } } - dps.Resize(pos) } func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.HistogramDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) - dps.Resize(ocPointsCount) + dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() - pos := 0 for _, timeseries := range ocMetric.GetTimeseries() { if timeseries == nil { continue @@ -312,9 +292,7 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra continue } - dp := dps.At(pos) - pos++ - + dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime())) fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) @@ -325,14 +303,12 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra dp.SetExplicitBounds(distributionValue.GetBucketOptions().GetExplicit().GetBounds()) } } - dps.Resize(pos) } func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) - dps.Resize(ocPointsCount) + dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() - pos := 0 for _, timeseries := range ocMetric.GetTimeseries() { if timeseries == nil { continue @@ -344,9 +320,7 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat continue } - dp := dps.At(pos) - pos++ - + dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) dp.SetTimestamp(pdata.TimestampFromTime(point.GetTimestamp().AsTime())) fillLabelsMap(ocLabelsKeys, timeseries.LabelValues, dp.LabelsMap()) @@ -356,7 +330,6 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat ocSummaryPercentilesToMetrics(summaryValue.GetSnapshot().GetPercentileValues(), dp) } } - dps.Resize(pos) } func ocHistogramBucketsToMetrics(ocBuckets []*ocmetrics.DistributionValue_Bucket, dp pdata.HistogramDataPoint) { @@ -380,11 +353,12 @@ func ocSummaryPercentilesToMetrics(ocPercentiles []*ocmetrics.SummaryValue_Snaps } quantiles := pdata.NewValueAtQuantileSlice() - quantiles.Resize(len(ocPercentiles)) + quantiles.EnsureCapacity(len(ocPercentiles)) - for i, percentile := range ocPercentiles { - quantiles.At(i).SetQuantile(percentile.GetPercentile() / 100) - quantiles.At(i).SetValue(percentile.GetValue()) + for _, percentile := range ocPercentiles { + quantile := quantiles.AppendEmpty() + quantile.SetQuantile(percentile.GetPercentile() / 100) + quantile.SetValue(percentile.GetValue()) } quantiles.CopyTo(dp.QuantileValues()) diff --git a/internal/otel_collector/translator/internaldata/oc_to_resource.go b/internal/otel_collector/translator/internaldata/oc_to_resource.go index d98d508d784..96f814895dd 100644 --- a/internal/otel_collector/translator/internaldata/oc_to_resource.go +++ b/internal/otel_collector/translator/internaldata/oc_to_resource.go @@ -21,8 +21,8 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "go.opencensus.io/resource/resourcekeys" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" ) diff --git a/internal/otel_collector/translator/internaldata/oc_to_traces.go b/internal/otel_collector/translator/internaldata/oc_to_traces.go index 760fc6ab93d..a5ebd39d6cf 100644 --- a/internal/otel_collector/translator/internaldata/oc_to_traces.go +++ b/internal/otel_collector/translator/internaldata/oc_to_traces.go @@ -23,8 +23,8 @@ import ( "go.opencensus.io/trace" "google.golang.org/protobuf/types/known/wrapperspb" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -81,22 +81,18 @@ func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*oct // 1 (for all spans with nil resource) + numSpansWithResource (distinctResourceCount). rss := traceData.ResourceSpans() - rss.Resize(distinctResourceCount + 1) - rs0 := rss.At(0) + rss.EnsureCapacity(distinctResourceCount + 1) + rs0 := rss.AppendEmpty() ocNodeResourceToInternal(node, resource, rs0.Resource()) // Allocate a slice for spans that need to be combined into first ResourceSpans. ils0 := rs0.InstrumentationLibrarySpans().AppendEmpty() combinedSpans := ils0.Spans() - combinedSpans.Resize(combinedSpanCount) + combinedSpans.EnsureCapacity(combinedSpanCount) // Now do the span translation and place them in appropriate ResourceSpans // instances. - // Index to next available slot in "combinedSpans" slice. - combinedSpanIdx := 0 - // First ResourceSpans is used for the default resource, so start with 1. - resourceSpanIdx := 1 for _, ocSpan := range spans { if ocSpan == nil { // Skip nil spans. @@ -107,13 +103,11 @@ func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*oct // Add the span to the "combinedSpans". combinedSpans length is equal // to combinedSpanCount. The loop above that calculates combinedSpanCount // has exact same conditions as we have here in this loop. - ocSpanToInternal(ocSpan, combinedSpans.At(combinedSpanIdx)) - combinedSpanIdx++ + ocSpanToInternal(ocSpan, combinedSpans.AppendEmpty()) } else { // This span has a different Resource and must be placed in a different // ResourceSpans instance. Create a separate ResourceSpans item just for this span. - ocSpanToResourceSpans(ocSpan, node, traceData.ResourceSpans().At(resourceSpanIdx)) - resourceSpanIdx++ + ocSpanToResourceSpans(ocSpan, node, traceData.ResourceSpans().AppendEmpty()) } } @@ -294,18 +288,15 @@ func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { } events := dest.Events() - events.Resize(len(ocEvents.TimeEvent)) + events.EnsureCapacity(len(ocEvents.TimeEvent)) - i := 0 for _, ocEvent := range ocEvents.TimeEvent { if ocEvent == nil { // Skip nil source events. continue } - event := events.At(i) - i++ - + event := events.AppendEmpty() event.SetTimestamp(pdata.TimestampFromTime(ocEvent.Time.AsTime())) switch teValue := ocEvent.Value.(type) { @@ -326,9 +317,6 @@ func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { event.SetName("An unknown OpenCensus TimeEvent type was detected when translating") } } - - // Truncate the slice to only include populated items. - events.Resize(i) } func ocLinksToInternal(ocLinks *octrace.Span_Links, dest pdata.Span) { @@ -343,26 +331,20 @@ func ocLinksToInternal(ocLinks *octrace.Span_Links, dest pdata.Span) { } links := dest.Links() - links.Resize(len(ocLinks.Link)) + links.EnsureCapacity(len(ocLinks.Link)) - i := 0 for _, ocLink := range ocLinks.Link { if ocLink == nil { continue } - link := links.At(i) - i++ - + link := links.AppendEmpty() link.SetTraceID(traceIDToInternal(ocLink.TraceId)) link.SetSpanID(spanIDToInternal(ocLink.SpanId)) link.SetTraceState(ocTraceStateToInternal(ocLink.Tracestate)) initAttributeMapFromOC(ocLink.Attributes, link.Attributes()) link.SetDroppedAttributesCount(ocAttrsToDroppedAttributes(ocLink.Attributes)) } - - // Truncate the slice to only include populated items. - links.Resize(i) } func ocMessageEventToInternalAttrs(msgEvent *octrace.Span_TimeEvent_MessageEvent, dest pdata.AttributeMap) { diff --git a/internal/otel_collector/translator/internaldata/resource_to_oc.go b/internal/otel_collector/translator/internaldata/resource_to_oc.go index 91d1f9394dc..aa5ac5b7b7a 100644 --- a/internal/otel_collector/translator/internaldata/resource_to_oc.go +++ b/internal/otel_collector/translator/internaldata/resource_to_oc.go @@ -23,8 +23,8 @@ import ( "go.opencensus.io/resource/resourcekeys" "google.golang.org/protobuf/types/known/timestamppb" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) diff --git a/internal/otel_collector/translator/internaldata/timestamp.go b/internal/otel_collector/translator/internaldata/timestamp.go index f2a221fe2ba..48dc8325f0f 100644 --- a/internal/otel_collector/translator/internaldata/timestamp.go +++ b/internal/otel_collector/translator/internaldata/timestamp.go @@ -17,7 +17,7 @@ package internaldata import ( "google.golang.org/protobuf/types/known/timestamppb" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // timestampAsTimestampPb converts a pdata.Timestamp to a protobuf known type Timestamp. diff --git a/internal/otel_collector/translator/internaldata/traces_to_oc.go b/internal/otel_collector/translator/internaldata/traces_to_oc.go index ed4acb77feb..846bb6c6d13 100644 --- a/internal/otel_collector/translator/internaldata/traces_to_oc.go +++ b/internal/otel_collector/translator/internaldata/traces_to_oc.go @@ -24,8 +24,8 @@ import ( "go.opencensus.io/trace" "google.golang.org/protobuf/types/known/wrapperspb" - "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) diff --git a/internal/otel_collector/translator/trace/zipkin/attributes.go b/internal/otel_collector/translator/trace/internal/zipkin/attributes.go similarity index 53% rename from internal/otel_collector/translator/trace/zipkin/attributes.go rename to internal/otel_collector/translator/trace/internal/zipkin/attributes.go index 513b404ce3d..9269d866ab8 100644 --- a/internal/otel_collector/translator/trace/zipkin/attributes.go +++ b/internal/otel_collector/translator/trace/internal/zipkin/attributes.go @@ -17,27 +17,23 @@ package zipkin import ( "regexp" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // These constants are the attribute keys used when translating from zipkin // format to the internal collector data format. const ( - startTimeAbsent = "otel.zipkin.absentField.startTime" - tagServiceNameSource = "otlp.service.name.source" + StartTimeAbsent = "otel.zipkin.absentField.startTime" + TagServiceNameSource = "otlp.service.name.source" ) -var attrValDescriptions = getAttrValDescripts() - -func getAttrValDescripts() []*attrValDescript { - descriptions := make([]*attrValDescript, 0, 5) - descriptions = append(descriptions, constructAttrValDescript("^$", pdata.AttributeValueTypeNull)) - descriptions = append(descriptions, constructAttrValDescript(`^-?\d+$`, pdata.AttributeValueTypeInt)) - descriptions = append(descriptions, constructAttrValDescript(`^-?\d+\.\d+$`, pdata.AttributeValueTypeDouble)) - descriptions = append(descriptions, constructAttrValDescript(`^(true|false)$`, pdata.AttributeValueTypeBool)) - descriptions = append(descriptions, constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.AttributeValueTypeMap)) - descriptions = append(descriptions, constructAttrValDescript(`^\[.*\]$`, pdata.AttributeValueTypeArray)) - return descriptions +var attrValDescriptions = []*attrValDescript{ + constructAttrValDescript("^$", pdata.AttributeValueTypeNull), + constructAttrValDescript(`^-?\d+$`, pdata.AttributeValueTypeInt), + constructAttrValDescript(`^-?\d+\.\d+$`, pdata.AttributeValueTypeDouble), + constructAttrValDescript(`^(true|false)$`, pdata.AttributeValueTypeBool), + constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.AttributeValueTypeMap), + constructAttrValDescript(`^\[.*\]$`, pdata.AttributeValueTypeArray), } type attrValDescript struct { @@ -53,8 +49,8 @@ func constructAttrValDescript(regex string, attrType pdata.AttributeValueType) * } } -// determineValueType returns the native OTLP attribute type the string translates to. -func determineValueType(value string) pdata.AttributeValueType { +// DetermineValueType returns the native OTLP attribute type the string translates to. +func DetermineValueType(value string) pdata.AttributeValueType { for _, desc := range attrValDescriptions { if desc.regex.MatchString(value) { return desc.attrType diff --git a/internal/otel_collector/translator/trace/jaeger/constants.go b/internal/otel_collector/translator/trace/jaeger/constants.go index 48925f21e41..9228f254629 100644 --- a/internal/otel_collector/translator/trace/jaeger/constants.go +++ b/internal/otel_collector/translator/trace/jaeger/constants.go @@ -19,6 +19,6 @@ import ( ) var ( - errZeroTraceID = errors.New("OC span has an all zeros trace ID") - errZeroSpanID = errors.New("OC span has an all zeros span ID") + errZeroTraceID = errors.New("span has an all zeros trace ID") + errZeroSpanID = errors.New("span has an all zeros span ID") ) diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go index 1a465347443..7f76afb3213 100644 --- a/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go +++ b/internal/otel_collector/translator/trace/jaeger/jaegerproto_to_traces.go @@ -24,9 +24,9 @@ import ( "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" - "go.opentelemetry.io/collector/consumer/pdata" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -41,21 +41,14 @@ func ProtoBatchesToInternalTraces(batches []*model.Batch) pdata.Traces { } rss := traceData.ResourceSpans() - rss.Resize(len(batches)) + rss.EnsureCapacity(len(batches)) - i := 0 for _, batch := range batches { if batch.GetProcess() == nil && len(batch.GetSpans()) == 0 { continue } - protoBatchToResourceSpans(*batch, rss.At(i)) - i++ - } - - // reduce traceData.ResourceSpans slice if some batched were skipped - if i < len(batches) { - rss.Resize(i) + protoBatchToResourceSpans(*batch, rss.AppendEmpty()) } return traceData @@ -148,13 +141,7 @@ func jSpansToInternal(spans []*model.Span) map[instrumentationLibrary]pdata.Span if span == nil || reflect.DeepEqual(span, blankJaegerProtoSpan) { continue } - pSpan, library := jSpanToInternal(span) - ss, found := spansByLibrary[library] - if !found { - ss = pdata.NewSpanSlice() - spansByLibrary[library] = ss - } - ss.Append(pSpan) + jSpanToInternal(span, spansByLibrary) } return spansByLibrary } @@ -163,8 +150,15 @@ type instrumentationLibrary struct { name, version string } -func jSpanToInternal(span *model.Span) (pdata.Span, instrumentationLibrary) { - dest := pdata.NewSpan() +func jSpanToInternal(span *model.Span, spansByLibrary map[instrumentationLibrary]pdata.SpanSlice) { + il := getInstrumentationLibrary(span) + ss, found := spansByLibrary[il] + if !found { + ss = pdata.NewSpanSlice() + spansByLibrary[il] = ss + } + + dest := ss.AppendEmpty() dest.SetTraceID(idutils.UInt64ToTraceID(span.TraceID.High, span.TraceID.Low)) dest.SetSpanID(idutils.UInt64ToSpanID(uint64(span.SpanID))) dest.SetName(span.OperationName) @@ -186,16 +180,6 @@ func jSpanToInternal(span *model.Span) (pdata.Span, instrumentationLibrary) { attrs.Delete(tracetranslator.TagSpanKind) } - il := instrumentationLibrary{} - if libraryName, ok := attrs.Get(conventions.InstrumentationLibraryName); ok { - il.name = libraryName.StringVal() - attrs.Delete(conventions.InstrumentationLibraryName) - if libraryVersion, ok := attrs.Get(conventions.InstrumentationLibraryVersion); ok { - il.version = libraryVersion.StringVal() - attrs.Delete(conventions.InstrumentationLibraryVersion) - } - } - dest.SetTraceState(getTraceStateFromAttrs(attrs)) // drop the attributes slice if all of them were replaced during translation @@ -205,8 +189,6 @@ func jSpanToInternal(span *model.Span) (pdata.Span, instrumentationLibrary) { jLogsToSpanEvents(span.Logs, dest.Events()) jReferencesToSpanLinks(span.References, parentSpanID, dest.Links()) - - return dest, il } func jTagsToInternalAttributes(tags []model.KeyValue, dest pdata.AttributeMap) { @@ -323,10 +305,15 @@ func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { return } - dest.Resize(len(logs)) + dest.EnsureCapacity(len(logs)) for i, log := range logs { - event := dest.At(i) + var event pdata.SpanEvent + if dest.Len() > i { + event = dest.At(i) + } else { + event = dest.AppendEmpty() + } event.SetTimestamp(pdata.TimestampFromTime(log.Timestamp)) if len(log.Fields) == 0 { @@ -350,22 +337,15 @@ func jReferencesToSpanLinks(refs []model.SpanRef, excludeParentID model.SpanID, return } - dest.Resize(len(refs)) - i := 0 + dest.EnsureCapacity(len(refs)) for _, ref := range refs { - link := dest.At(i) if ref.SpanID == excludeParentID && ref.RefType == model.ChildOf { continue } + link := dest.AppendEmpty() link.SetTraceID(idutils.UInt64ToTraceID(ref.TraceID.High, ref.TraceID.Low)) link.SetSpanID(idutils.UInt64ToSpanID(uint64(ref.SpanID))) - i++ - } - - // Reduce slice size in case if excludeParentID was skipped - if i < len(refs) { - dest.Resize(i) } } @@ -378,3 +358,25 @@ func getTraceStateFromAttrs(attrs pdata.AttributeMap) pdata.TraceState { } return traceState } + +func getInstrumentationLibrary(span *model.Span) instrumentationLibrary { + il := instrumentationLibrary{} + if libraryName, ok := getAndDeleteTag(span, conventions.InstrumentationLibraryName); ok { + il.name = libraryName + if libraryVersion, ok := getAndDeleteTag(span, conventions.InstrumentationLibraryVersion); ok { + il.version = libraryVersion + } + } + return il +} + +func getAndDeleteTag(span *model.Span, key string) (string, bool) { + for i := range span.Tags { + if span.Tags[i].Key == key { + value := span.Tags[i].GetVStr() + span.Tags = append(span.Tags[:i], span.Tags[i+1:]...) + return value, true + } + } + return "", false +} diff --git a/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go index 2367541ba6c..bf27a746aef 100644 --- a/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go +++ b/internal/otel_collector/translator/trace/jaeger/jaegerthrift_to_traces.go @@ -21,8 +21,8 @@ import ( "github.com/jaegertracing/jaeger/thrift-gen/jaeger" - "go.opentelemetry.io/collector/consumer/pdata" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) @@ -80,18 +80,12 @@ func jThriftSpansToInternal(spans []*jaeger.Span, dest pdata.SpanSlice) { return } - dest.Resize(len(spans)) - i := 0 + dest.EnsureCapacity(len(spans)) for _, span := range spans { if span == nil || reflect.DeepEqual(span, blankJaegerProtoSpan) { continue } - jThriftSpanToInternal(span, dest.At(i)) - i++ - } - - if i < len(spans) { - dest.Resize(i) + jThriftSpanToInternal(span, dest.AppendEmpty()) } } @@ -151,10 +145,10 @@ func jThriftLogsToSpanEvents(logs []*jaeger.Log, dest pdata.SpanEventSlice) { return } - dest.Resize(len(logs)) + dest.EnsureCapacity(len(logs)) - for i, log := range logs { - event := dest.At(i) + for _, log := range logs { + event := dest.AppendEmpty() event.SetTimestamp(microsecondsToUnixNano(log.Timestamp)) if len(log.Fields) == 0 { @@ -177,22 +171,15 @@ func jThriftReferencesToSpanLinks(refs []*jaeger.SpanRef, excludeParentID int64, return } - dest.Resize(len(refs)) - i := 0 + dest.EnsureCapacity(len(refs)) for _, ref := range refs { - link := dest.At(i) if ref.SpanId == excludeParentID && ref.RefType == jaeger.SpanRefType_CHILD_OF { continue } + link := dest.AppendEmpty() link.SetTraceID(idutils.UInt64ToTraceID(uint64(ref.TraceIdHigh), uint64(ref.TraceIdLow))) link.SetSpanID(idutils.UInt64ToSpanID(uint64(ref.SpanId))) - i++ - } - - // Reduce slice size in case if excludeParentID was skipped - if i < len(refs) { - dest.Resize(i) } } diff --git a/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go index 11350e106ef..cc8ec2d1815 100644 --- a/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go +++ b/internal/otel_collector/translator/trace/jaeger/traces_to_jaegerproto.go @@ -19,8 +19,8 @@ import ( "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/consumer/pdata" idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" ) diff --git a/internal/otel_collector/translator/trace/protospan_translation.go b/internal/otel_collector/translator/trace/protospan_translation.go index 3dcb6cdac3c..9d88fe7502b 100644 --- a/internal/otel_collector/translator/trace/protospan_translation.go +++ b/internal/otel_collector/translator/trace/protospan_translation.go @@ -19,7 +19,7 @@ import ( "fmt" "strconv" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // Some of the keys used to represent OTLP constructs as tags or annotations in other formats. diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go deleted file mode 100644 index 0eee057a9df..00000000000 --- a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_traces.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/translator/internaldata" -) - -// V1ThriftBatchToInternalTraces transforms Zipkin v1 spans into pdata.Traces. -func V1ThriftBatchToInternalTraces(zSpans []*zipkincore.Span) (pdata.Traces, error) { - traces := pdata.NewTraces() - ocTraces, _ := v1ThriftBatchToOCProto(zSpans) - - for _, td := range ocTraces { - tmp := internaldata.OCToTraces(td.Node, td.Resource, td.Spans) - tmp.ResourceSpans().MoveAndAppendTo(traces.ResourceSpans()) - } - return traces, nil -} diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go b/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go deleted file mode 100644 index 997edb16d8d..00000000000 --- a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_traces.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/translator/internaldata" -) - -// V1JSONBatchToInternalTraces transforms a JSON blob with a list of Zipkin v1 spans into pdata.Traces. -func V1JSONBatchToInternalTraces(blob []byte, parseStringTags bool) (pdata.Traces, error) { - traces := pdata.NewTraces() - - ocTraces, err := v1JSONBatchToOCProto(blob, parseStringTags) - if err != nil { - return traces, err - } - - for _, td := range ocTraces { - tmp := internaldata.OCToTraces(td.Node, td.Resource, td.Spans) - tmp.ResourceSpans().MoveAndAppendTo(traces.ResourceSpans()) - } - return traces, nil -} diff --git a/internal/otel_collector/translator/trace/zipkin/consumerdata.go b/internal/otel_collector/translator/trace/zipkinv1/consumerdata.go similarity index 98% rename from internal/otel_collector/translator/trace/zipkin/consumerdata.go rename to internal/otel_collector/translator/trace/zipkinv1/consumerdata.go index 6495ca93f28..65fdd62d770 100644 --- a/internal/otel_collector/translator/trace/zipkin/consumerdata.go +++ b/internal/otel_collector/translator/trace/zipkinv1/consumerdata.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv1 import ( commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" diff --git a/internal/otel_collector/translator/trace/zipkin/grpc_http_mapper.go b/internal/otel_collector/translator/trace/zipkinv1/grpc_http_mapper.go similarity index 99% rename from internal/otel_collector/translator/trace/zipkin/grpc_http_mapper.go rename to internal/otel_collector/translator/trace/zipkinv1/grpc_http_mapper.go index 52d75b9f3ed..e3a1beb6428 100644 --- a/internal/otel_collector/translator/trace/zipkin/grpc_http_mapper.go +++ b/internal/otel_collector/translator/trace/zipkinv1/grpc_http_mapper.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv1 // https://github.com/googleapis/googleapis/blob/bee79fbe03254a35db125dc6d2f1e9b752b390fe/google/rpc/code.proto#L33-L186 const ( diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go b/internal/otel_collector/translator/trace/zipkinv1/json.go similarity index 94% rename from internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go rename to internal/otel_collector/translator/trace/zipkinv1/json.go index 38baae5db9c..0f4f5f0492b 100644 --- a/internal/otel_collector/translator/trace/zipkin/zipkinv1_to_protospan.go +++ b/internal/otel_collector/translator/trace/zipkinv1/json.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv1 import ( "encoding/json" @@ -27,9 +27,10 @@ import ( "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" "google.golang.org/protobuf/types/known/timestamppb" - "go.opentelemetry.io/collector/consumer/pdata" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/pdata" tracetranslator "go.opentelemetry.io/collector/translator/trace" + "go.opentelemetry.io/collector/translator/trace/internal/zipkin" ) var ( @@ -47,6 +48,25 @@ var ( errHexIDZero = errors.New("ID is zero") ) +type jsonUnmarshaler struct { + // ParseStringTags should be set to true if tags should be converted to numbers when possible. + ParseStringTags bool +} + +// UnmarshalTraces from JSON bytes. +func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { + tds, err := v1JSONBatchToOCProto(buf, j.ParseStringTags) + if err != nil { + return pdata.Traces{}, err + } + return toTraces(tds) +} + +// NewJSONTracesUnmarshaler returns an unmarshaler for Zipkin JSON. +func NewJSONTracesUnmarshaler(parseStringTags bool) pdata.TracesUnmarshaler { + return jsonUnmarshaler{ParseStringTags: parseStringTags} +} + // Trace translation from Zipkin V1 is a bit of special case since there is no model // defined in golang for Zipkin V1 spans and there is no need to define one here, given // that the zipkinV1Span defined below is as defined at: @@ -254,7 +274,7 @@ func parseAnnotationValue(value string, parseStringTags bool) *tracepb.Attribute pbAttrib := &tracepb.AttributeValue{} if parseStringTags { - switch determineValueType(value) { + switch zipkin.DetermineValueType(value) { case pdata.AttributeValueTypeInt: iValue, _ := strconv.ParseInt(value, 10, 64) pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: iValue} @@ -509,7 +529,7 @@ func setTimestampsIfUnset(span *tracepb.Span) { if span.Attributes.AttributeMap == nil { span.Attributes.AttributeMap = make(map[string]*tracepb.AttributeValue, 1) } - span.Attributes.AttributeMap[startTimeAbsent] = &tracepb.AttributeValue{ + span.Attributes.AttributeMap[zipkin.StartTimeAbsent] = &tracepb.AttributeValue{ Value: &tracepb.AttributeValue_BoolValue{ BoolValue: true, }} diff --git a/internal/otel_collector/translator/trace/zipkin/status_code.go b/internal/otel_collector/translator/trace/zipkinv1/status_code.go similarity index 99% rename from internal/otel_collector/translator/trace/zipkin/status_code.go rename to internal/otel_collector/translator/trace/zipkinv1/status_code.go index 62f0fff9bb5..9cb611a6424 100644 --- a/internal/otel_collector/translator/trace/zipkin/status_code.go +++ b/internal/otel_collector/translator/trace/zipkinv1/status_code.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv1 import ( "fmt" diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_error_batch.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_error_batch.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_error_batch.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_error_batch.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_local_component.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_local_component.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_local_component.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_local_component.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_multiple_batches.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_multiple_batches.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_multiple_batches.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_multiple_batches.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_single_batch.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_single_batch.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_single_batch.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_single_batch.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_local_component.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_thrift_local_component.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_local_component.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_thrift_local_component.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_single_batch.json b/internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_thrift_single_batch.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v1_thrift_single_batch.json rename to internal/otel_collector/translator/trace/zipkinv1/testdata/zipkin_v1_thrift_single_batch.json diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go b/internal/otel_collector/translator/trace/zipkinv1/thrift.go similarity index 92% rename from internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go rename to internal/otel_collector/translator/trace/zipkinv1/thrift.go index 2e588560bbd..fd9613b6ba9 100644 --- a/internal/otel_collector/translator/trace/zipkin/zipkinv1_thrift_to_protospan.go +++ b/internal/otel_collector/translator/trace/zipkinv1/thrift.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv1 import ( "bytes" @@ -24,12 +24,34 @@ import ( "net" tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + jaegerzipkin "github.com/jaegertracing/jaeger/model/converter/thrift/zipkin" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" "google.golang.org/protobuf/types/known/timestamppb" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/pdata" ) +type thriftUnmarshaler struct{} + +// UnmarshalTraces from Thrift bytes. +func (t thriftUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { + spans, err := jaegerzipkin.DeserializeThrift(buf) + if err != nil { + return pdata.Traces{}, err + } + tds, err := v1ThriftBatchToOCProto(spans) + if err != nil { + return pdata.Traces{}, err + } + return toTraces(tds) +} + +// NewThriftTracesUnmarshaler returns an unmarshaler for Zipkin Thrift. +func NewThriftTracesUnmarshaler() pdata.TracesUnmarshaler { + return thriftUnmarshaler{} +} + // v1ThriftBatchToOCProto converts Zipkin v1 spans to OC Proto. func v1ThriftBatchToOCProto(zSpans []*zipkincore.Span) ([]traceData, error) { ocSpansAndParsedAnnotations := make([]ocSpanAndParsedAnnotations, 0, len(zSpans)) diff --git a/internal/otel_collector/translator/trace/zipkinv1/to_translator.go b/internal/otel_collector/translator/trace/zipkinv1/to_translator.go new file mode 100644 index 00000000000..78876dac08b --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkinv1/to_translator.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinv1 + +import ( + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/translator/internaldata" +) + +func toTraces(ocTraces []traceData) (pdata.Traces, error) { + td := pdata.NewTraces() + + for _, trace := range ocTraces { + tmp := internaldata.OCToTraces(trace.Node, trace.Resource, trace.Spans) + tmp.ResourceSpans().MoveAndAppendTo(td.ResourceSpans()) + } + + return td, nil +} diff --git a/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go b/internal/otel_collector/translator/trace/zipkinv2/from_translator.go similarity index 93% rename from internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go rename to internal/otel_collector/translator/trace/zipkinv2/from_translator.go index 02f57ac9a70..34185943d22 100644 --- a/internal/otel_collector/translator/trace/zipkin/traces_to_zipkinv2.go +++ b/internal/otel_collector/translator/trace/zipkinv2/from_translator.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv2 import ( "encoding/json" @@ -24,10 +24,11 @@ import ( zipkinmodel "github.com/openzipkin/zipkin-go/model" - "go.opentelemetry.io/collector/consumer/pdata" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" + "go.opentelemetry.io/collector/translator/trace/internal/zipkin" ) const ( @@ -35,11 +36,16 @@ const ( spanLinkDataFormat = "%s|%s|%s|%s|%d" ) -var sampled = true +var ( + sampled = true +) + +// FromTranslator converts from pdata to Zipkin data model. +type FromTranslator struct{} -// InternalTracesToZipkinSpans translates internal trace data into Zipkin v2 spans. +// FromTraces translates internal trace data into Zipkin v2 spans. // Returns a slice of Zipkin SpanModel's. -func InternalTracesToZipkinSpans(td pdata.Traces) ([]*zipkinmodel.SpanModel, error) { +func (t FromTranslator) FromTraces(td pdata.Traces) ([]*zipkinmodel.SpanModel, error) { resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { return nil, nil @@ -268,15 +274,15 @@ func extractZipkinServiceName(zTags map[string]string) string { } else if fn, ok := zTags[conventions.AttributeFaasName]; ok { serviceName = fn delete(zTags, conventions.AttributeFaasName) - zTags[tagServiceNameSource] = conventions.AttributeFaasName + zTags[zipkin.TagServiceNameSource] = conventions.AttributeFaasName } else if fn, ok := zTags[conventions.AttributeK8sDeployment]; ok { serviceName = fn delete(zTags, conventions.AttributeK8sDeployment) - zTags[tagServiceNameSource] = conventions.AttributeK8sDeployment + zTags[zipkin.TagServiceNameSource] = conventions.AttributeK8sDeployment } else if fn, ok := zTags[conventions.AttributeProcessExecutableName]; ok { serviceName = fn delete(zTags, conventions.AttributeProcessExecutableName) - zTags[tagServiceNameSource] = conventions.AttributeProcessExecutableName + zTags[zipkin.TagServiceNameSource] = conventions.AttributeProcessExecutableName } else { serviceName = tracetranslator.ResourceNoServiceName } diff --git a/internal/otel_collector/translator/trace/zipkinv2/json.go b/internal/otel_collector/translator/trace/zipkinv2/json.go new file mode 100644 index 00000000000..3b9cbc38a83 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkinv2/json.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinv2 + +import ( + "encoding/json" + + zipkinmodel "github.com/openzipkin/zipkin-go/model" + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + + "go.opentelemetry.io/collector/model/pdata" +) + +type jsonUnmarshaler struct { + toTranslator ToTranslator +} + +// UnmarshalTraces from JSON bytes. +func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { + var spans []*zipkinmodel.SpanModel + if err := json.Unmarshal(buf, &spans); err != nil { + return pdata.Traces{}, err + } + return j.toTranslator.ToTraces(spans) +} + +type jsonMarshaler struct { + serializer zipkinreporter.JSONSerializer + fromTranslator FromTranslator +} + +// MarshalTraces to JSON bytes. +func (j jsonMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { + spans, err := j.fromTranslator.FromTraces(td) + if err != nil { + return nil, err + } + return j.serializer.Serialize(spans) +} + +// NewJSONTracesUnmarshaler returns an unmarshaler for JSON bytes. +func NewJSONTracesUnmarshaler(parseStringTags bool) pdata.TracesUnmarshaler { + return jsonUnmarshaler{toTranslator: ToTranslator{ParseStringTags: parseStringTags}} +} + +// NewJSONTracesMarshaler returns a marshaler to JSON bytes. +func NewJSONTracesMarshaler() pdata.TracesMarshaler { + return jsonMarshaler{} +} diff --git a/internal/otel_collector/translator/trace/zipkinv2/protobuf.go b/internal/otel_collector/translator/trace/zipkinv2/protobuf.go new file mode 100644 index 00000000000..a554bc73395 --- /dev/null +++ b/internal/otel_collector/translator/trace/zipkinv2/protobuf.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkinv2 + +import ( + "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" + + "go.opentelemetry.io/collector/model/pdata" +) + +type protobufUnmarshaler struct { + // debugWasSet toggles the Debug field of each Span. It is usually set to true if + // the "X-B3-Flags" header is set to 1 on the request. + debugWasSet bool + + toTranslator ToTranslator +} + +// UnmarshalTraces from protobuf bytes. +func (p protobufUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { + spans, err := zipkin_proto3.ParseSpans(buf, p.debugWasSet) + if err != nil { + return pdata.Traces{}, err + } + return p.toTranslator.ToTraces(spans) +} + +type protobufMarshaler struct { + serializer zipkin_proto3.SpanSerializer + fromTranslator FromTranslator +} + +// MarshalTraces to protobuf bytes. +func (p protobufMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { + spans, err := p.fromTranslator.FromTraces(td) + if err != nil { + return nil, err + } + return p.serializer.Serialize(spans) +} + +// NewProtobufTracesUnmarshaler returns an pdata.TracesUnmarshaler of protobuf bytes. +func NewProtobufTracesUnmarshaler(debugWasSet, parseStringTags bool) pdata.TracesUnmarshaler { + return protobufUnmarshaler{ + debugWasSet: debugWasSet, + toTranslator: ToTranslator{ParseStringTags: parseStringTags}, + } +} + +// NewProtobufTracesMarshaler returns a new pdata.TracesMarshaler to protobuf bytes. +func NewProtobufTracesMarshaler() pdata.TracesMarshaler { + return protobufMarshaler{} +} diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_notimestamp.json b/internal/otel_collector/translator/trace/zipkinv2/testdata/zipkin_v2_notimestamp.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_notimestamp.json rename to internal/otel_collector/translator/trace/zipkinv2/testdata/zipkin_v2_notimestamp.json diff --git a/internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_single.json b/internal/otel_collector/translator/trace/zipkinv2/testdata/zipkin_v2_single.json similarity index 100% rename from internal/otel_collector/translator/trace/zipkin/testdata/zipkin_v2_single.json rename to internal/otel_collector/translator/trace/zipkinv2/testdata/zipkin_v2_single.json diff --git a/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go b/internal/otel_collector/translator/trace/zipkinv2/to_translator.go similarity index 83% rename from internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go rename to internal/otel_collector/translator/trace/zipkinv2/to_translator.go index 7ee7414a9e0..a2f9b981ea4 100644 --- a/internal/otel_collector/translator/trace/zipkin/zipkinv2_to_traces.go +++ b/internal/otel_collector/translator/trace/zipkinv2/to_translator.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package zipkin +package zipkinv2 import ( + "encoding/hex" "encoding/json" + "errors" "fmt" "math" "sort" @@ -25,52 +27,22 @@ import ( zipkinmodel "github.com/openzipkin/zipkin-go/model" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/data" - otlptrace "go.opentelemetry.io/collector/internal/data/protogen/trace/v1" - idutils "go.opentelemetry.io/collector/internal/idutils" + "go.opentelemetry.io/collector/internal/idutils" "go.opentelemetry.io/collector/internal/occonventions" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" tracetranslator "go.opentelemetry.io/collector/translator/trace" + "go.opentelemetry.io/collector/translator/trace/internal/zipkin" ) -var nonSpanAttributes = func() map[string]struct{} { - attrs := make(map[string]struct{}) - for _, key := range conventions.GetResourceSemanticConventionAttributeNames() { - attrs[key] = struct{}{} - } - attrs[tagServiceNameSource] = struct{}{} - attrs[conventions.InstrumentationLibraryName] = struct{}{} - attrs[conventions.InstrumentationLibraryVersion] = struct{}{} - attrs[occonventions.AttributeProcessStartTime] = struct{}{} - attrs[occonventions.AttributeExporterVersion] = struct{}{} - attrs[conventions.AttributeProcessID] = struct{}{} - attrs[occonventions.AttributeResourceType] = struct{}{} - return attrs -}() - -// Custom Sort on -type byOTLPTypes []*zipkinmodel.SpanModel - -func (b byOTLPTypes) Len() int { - return len(b) +// ToTranslator converts from Zipkin data model to pdata. +type ToTranslator struct { + // ParseStringTags should be set to true if tags should be converted to numbers when possible. + ParseStringTags bool } -func (b byOTLPTypes) Less(i, j int) bool { - diff := strings.Compare(extractLocalServiceName(b[i]), extractLocalServiceName(b[j])) - if diff != 0 { - return diff <= 0 - } - diff = strings.Compare(extractInstrumentationLibrary(b[i]), extractInstrumentationLibrary(b[j])) - return diff <= 0 -} - -func (b byOTLPTypes) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -// V2SpansToInternalTraces translates Zipkin v2 spans into internal trace data. -func V2SpansToInternalTraces(zipkinSpans []*zipkinmodel.SpanModel, parseStringTags bool) (pdata.Traces, error) { +// ToTraces translates Zipkin v2 spans into pdata.Traces. +func (t ToTranslator) ToTraces(zipkinSpans []*zipkinmodel.SpanModel) (pdata.Traces, error) { traceData := pdata.NewTraces() if len(zipkinSpans) == 0 { return traceData, nil @@ -81,9 +53,7 @@ func V2SpansToInternalTraces(zipkinSpans []*zipkinmodel.SpanModel, parseStringTa rss := traceData.ResourceSpans() prevServiceName := "" prevInstrLibName := "" - rsCount := rss.Len() - ilsCount := 0 - spanCount := 0 + ilsIsNew := true var curRscSpans pdata.ResourceSpans var curILSpans pdata.InstrumentationLibrarySpans var curSpans pdata.SpanSlice @@ -95,34 +65,63 @@ func V2SpansToInternalTraces(zipkinSpans []*zipkinmodel.SpanModel, parseStringTa localServiceName := extractLocalServiceName(zspan) if localServiceName != prevServiceName { prevServiceName = localServiceName - rss.Resize(rsCount + 1) - curRscSpans = rss.At(rsCount) - rsCount++ + curRscSpans = rss.AppendEmpty() populateResourceFromZipkinSpan(tags, localServiceName, curRscSpans.Resource()) prevInstrLibName = "" - ilsCount = 0 + ilsIsNew = true } instrLibName := extractInstrumentationLibrary(zspan) - if instrLibName != prevInstrLibName || ilsCount == 0 { + if instrLibName != prevInstrLibName || ilsIsNew { prevInstrLibName = instrLibName - curRscSpans.InstrumentationLibrarySpans().Resize(ilsCount + 1) - curILSpans = curRscSpans.InstrumentationLibrarySpans().At(ilsCount) - ilsCount++ + curILSpans = curRscSpans.InstrumentationLibrarySpans().AppendEmpty() + ilsIsNew = false populateILFromZipkinSpan(tags, instrLibName, curILSpans.InstrumentationLibrary()) - spanCount = 0 curSpans = curILSpans.Spans() } - curSpans.Resize(spanCount + 1) - err := zSpanToInternal(zspan, tags, curSpans.At(spanCount), parseStringTags) + err := zSpanToInternal(zspan, tags, curSpans.AppendEmpty(), t.ParseStringTags) if err != nil { return traceData, err } - spanCount++ } return traceData, nil } +var nonSpanAttributes = func() map[string]struct{} { + attrs := make(map[string]struct{}) + for _, key := range conventions.GetResourceSemanticConventionAttributeNames() { + attrs[key] = struct{}{} + } + attrs[zipkin.TagServiceNameSource] = struct{}{} + attrs[conventions.InstrumentationLibraryName] = struct{}{} + attrs[conventions.InstrumentationLibraryVersion] = struct{}{} + attrs[occonventions.AttributeProcessStartTime] = struct{}{} + attrs[occonventions.AttributeExporterVersion] = struct{}{} + attrs[conventions.AttributeProcessID] = struct{}{} + attrs[occonventions.AttributeResourceType] = struct{}{} + return attrs +}() + +// Custom Sort on +type byOTLPTypes []*zipkinmodel.SpanModel + +func (b byOTLPTypes) Len() int { + return len(b) +} + +func (b byOTLPTypes) Less(i, j int) bool { + diff := strings.Compare(extractLocalServiceName(b[i]), extractLocalServiceName(b[j])) + if diff != 0 { + return diff <= 0 + } + diff = strings.Compare(extractInstrumentationLibrary(b[i]), extractInstrumentationLibrary(b[j])) + return diff <= 0 +} + +func (b byOTLPTypes) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pdata.Span, parseStringTags bool) error { dest.SetTraceID(idutils.UInt64ToTraceID(zspan.TraceID.High, zspan.TraceID.Low)) dest.SetSpanID(idutils.UInt64ToSpanID(uint64(zspan.ID))) @@ -158,7 +157,7 @@ func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest func populateSpanStatus(tags map[string]string, status pdata.SpanStatus) { if value, ok := tags[tracetranslator.TagStatusCode]; ok { - status.SetCode(pdata.StatusCode(otlptrace.Status_StatusCode_value[value])) + status.SetCode(pdata.StatusCode(statusCodeValue[value])) delete(tags, tracetranslator.TagStatusCode) if value, ok := tags[tracetranslator.TagStatusMsg]; ok { status.SetMessage(value) @@ -196,7 +195,6 @@ func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) pdata.S } func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { - index := 0 for i := 0; i < 128; i++ { key := fmt.Sprintf("otlp.link.%d", i) val, ok := tags[key] @@ -210,25 +208,23 @@ func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { if partCnt < 5 { continue } - dest.Resize(index + 1) - link := dest.At(index) - index++ + link := dest.AppendEmpty() // Convert trace id. - rawTrace := data.TraceID{} - errTrace := rawTrace.UnmarshalJSON([]byte(parts[0])) + rawTrace := [16]byte{} + errTrace := unmarshalJSON(rawTrace[:], []byte(parts[0])) if errTrace != nil { return errTrace } - link.SetTraceID(pdata.NewTraceID(rawTrace.Bytes())) + link.SetTraceID(pdata.NewTraceID(rawTrace)) // Convert span id. - rawSpan := data.SpanID{} - errSpan := rawSpan.UnmarshalJSON([]byte(parts[1])) + rawSpan := [8]byte{} + errSpan := unmarshalJSON(rawSpan[:], []byte(parts[1])) if errSpan != nil { return errSpan } - link.SetSpanID(pdata.NewSpanID(rawSpan.Bytes())) + link.SetSpanID(pdata.NewSpanID(rawSpan)) link.SetTraceState(pdata.TraceState(parts[2])) @@ -257,9 +253,9 @@ func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { } func populateSpanEvents(zspan *zipkinmodel.SpanModel, events pdata.SpanEventSlice) error { - events.Resize(len(zspan.Annotations)) - for ix, anno := range zspan.Annotations { - event := events.At(ix) + events.EnsureCapacity(len(zspan.Annotations)) + for _, anno := range zspan.Annotations { + event := events.AppendEmpty() event.SetTimestamp(pdata.TimestampFromTime(anno.Timestamp)) parts := strings.Split(anno.Value, "|") @@ -348,7 +344,7 @@ func tagsToAttributeMap(tags map[string]string, dest pdata.AttributeMap, parseSt } if parseStringTags { - switch determineValueType(val) { + switch zipkin.DetermineValueType(val) { case pdata.AttributeValueTypeInt: iValue, _ := strconv.ParseInt(val, 10, 64) dest.UpsertInt(key, iValue) @@ -378,13 +374,13 @@ func populateResourceFromZipkinSpan(tags map[string]string, localServiceName str return } - snSource := tags[tagServiceNameSource] + snSource := tags[zipkin.TagServiceNameSource] if snSource == "" { resource.Attributes().InsertString(conventions.AttributeServiceName, localServiceName) } else { resource.Attributes().InsertString(snSource, localServiceName) } - delete(tags, tagServiceNameSource) + delete(tags, zipkin.TagServiceNameSource) for key := range nonSpanAttributes { if key == conventions.InstrumentationLibraryName || key == conventions.InstrumentationLibraryVersion { @@ -445,9 +441,38 @@ func setTimestampsV2(zspan *zipkinmodel.SpanModel, dest pdata.Span, destAttrs pd dest.SetStartTimestamp(unixTimeZero) dest.SetEndTimestamp(zeroPlusDuration) - destAttrs.InsertBool(startTimeAbsent, true) + destAttrs.InsertBool(zipkin.StartTimeAbsent, true) } else { dest.SetStartTimestamp(pdata.TimestampFromTime(zspan.Timestamp)) dest.SetEndTimestamp(pdata.TimestampFromTime(zspan.Timestamp.Add(zspan.Duration))) } } + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +// TODO: Find a way to avoid this duplicate code. Consider to expose this in model/pdata. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} + +// TODO: Find a way to avoid this duplicate code. Consider to expose this in model/pdata. +var statusCodeValue = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} diff --git a/processor/otel/consumer.go b/processor/otel/consumer.go index 32c9728c9b2..9d16dfbb602 100644 --- a/processor/otel/consumer.go +++ b/processor/otel/consumer.go @@ -45,8 +45,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/otlptext" + "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" "google.golang.org/grpc/codes" @@ -76,6 +76,11 @@ const ( AttributeNetworkICC = "net.host.carrier.icc" ) +var ( + jsonTracesMarshaler = otlp.NewJSONTracesMarshaler() + jsonMetricsMarshaler = otlp.NewJSONMetricsMarshaler() +) + // Consumer transforms open-telemetry data to be compatible with elastic APM data type Consumer struct { stats consumerStats @@ -116,7 +121,12 @@ func (c *Consumer) ConsumeTraces(ctx context.Context, traces pdata.Traces) error receiveTimestamp := time.Now() logger := logp.NewLogger(logs.Otel) if logger.IsDebug() { - logger.Debug(otlptext.Traces(traces)) + data, err := jsonTracesMarshaler.MarshalTraces(traces) + if err != nil { + logger.Debug(err) + } else { + logger.Debug(data) + } } batch := c.convert(traces, receiveTimestamp, logger) return c.Processor.ProcessBatch(ctx, batch) diff --git a/processor/otel/consumer_test.go b/processor/otel/consumer_test.go index 942fcac86af..b7a3646f990 100644 --- a/processor/otel/consumer_test.go +++ b/processor/otel/consumer_test.go @@ -45,7 +45,7 @@ import ( jaegermodel "github.com/jaegertracing/jaeger/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" jaegertranslator "go.opentelemetry.io/collector/translator/trace/jaeger" "google.golang.org/grpc/codes" @@ -76,18 +76,16 @@ func TestOutcome(t *testing.T) { t.Helper() traces, spans := newTracesSpans() - otelSpan1 := pdata.NewSpan() + otelSpan1 := spans.Spans().AppendEmpty() otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan1.Status().SetCode(statusCode) - otelSpan2 := pdata.NewSpan() + otelSpan2 := spans.Spans().AppendEmpty() otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) otelSpan2.Status().SetCode(statusCode) - spans.Spans().Append(otelSpan1) - spans.Spans().Append(otelSpan2) batch := transformTraces(t, traces) require.Len(t, batch, 2) @@ -103,16 +101,14 @@ func TestOutcome(t *testing.T) { func TestRepresentativeCount(t *testing.T) { traces, spans := newTracesSpans() - otelSpan1 := pdata.NewSpan() + otelSpan1 := spans.Spans().AppendEmpty() otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) - otelSpan2 := pdata.NewSpan() + otelSpan2 := spans.Spans().AppendEmpty() otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) - spans.Spans().Append(otelSpan1) - spans.Spans().Append(otelSpan2) batch := transformTraces(t, traces) require.Len(t, batch, 2) @@ -460,10 +456,9 @@ func TestInstrumentationLibrary(t *testing.T) { traces, spans := newTracesSpans() spans.InstrumentationLibrary().SetName("library-name") spans.InstrumentationLibrary().SetVersion("1.2.3") - otelSpan := pdata.NewSpan() + otelSpan := spans.Spans().AppendEmpty() otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) - spans.Spans().Append(otelSpan) events := transformTraces(t, traces) tx := events[0].Transaction @@ -581,12 +576,12 @@ func TestSpanNetworkAttributes(t *testing.T) { func TestArrayLabels(t *testing.T) { stringArray := pdata.NewAttributeValueArray() - stringArray.ArrayVal().Append(pdata.NewAttributeValueString("string1")) - stringArray.ArrayVal().Append(pdata.NewAttributeValueString("string2")) + stringArray.ArrayVal().AppendEmpty().SetStringVal("string1") + stringArray.ArrayVal().AppendEmpty().SetStringVal("string2") boolArray := pdata.NewAttributeValueArray() - boolArray.ArrayVal().Append(pdata.NewAttributeValueBool(false)) - boolArray.ArrayVal().Append(pdata.NewAttributeValueBool(true)) + boolArray.ArrayVal().AppendEmpty().SetBoolVal(false) + boolArray.ArrayVal().AppendEmpty().SetBoolVal(true) tx := transformTransactionWithAttributes(t, map[string]pdata.AttributeValue{ "string_array": stringArray, @@ -635,22 +630,20 @@ func TestConsumeTracesExportTimestamp(t *testing.T) { exportedSpanTimestamp := exportTimestamp.Add(spanOffset) exportedExceptionTimestamp := exportTimestamp.Add(exceptionOffset) - otelSpan1 := pdata.NewSpan() + otelSpan1 := otelSpans.Spans().AppendEmpty() otelSpan1.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan1.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan1.SetStartTimestamp(pdata.TimestampFromTime(exportedTransactionTimestamp)) otelSpan1.SetEndTimestamp(pdata.TimestampFromTime(exportedTransactionTimestamp.Add(transactionDuration))) - otelSpans.Spans().Append(otelSpan1) - otelSpan2 := pdata.NewSpan() + otelSpan2 := otelSpans.Spans().AppendEmpty() otelSpan2.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan2.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan2.SetParentSpanID(pdata.NewSpanID([8]byte{3})) otelSpan2.SetStartTimestamp(pdata.TimestampFromTime(exportedSpanTimestamp)) otelSpan2.SetEndTimestamp(pdata.TimestampFromTime(exportedSpanTimestamp.Add(spanDuration))) - otelSpans.Spans().Append(otelSpan2) - otelSpanEvent := pdata.NewSpanEvent() + otelSpanEvent := otelSpan2.Events().AppendEmpty() otelSpanEvent.SetTimestamp(pdata.TimestampFromTime(exportedExceptionTimestamp)) otelSpanEvent.SetName("exception") otelSpanEvent.Attributes().InitFromMap(map[string]pdata.AttributeValue{ @@ -658,7 +651,6 @@ func TestConsumeTracesExportTimestamp(t *testing.T) { "exception.message": pdata.NewAttributeValueString("the_message"), "exception.stacktrace": pdata.NewAttributeValueString("the_stacktrace"), }) - otelSpan2.Events().Append(otelSpanEvent) batch := transformTraces(t, traces) require.Len(t, batch, 3) @@ -1210,21 +1202,20 @@ func jaegerKeyValue(k string, v interface{}) jaegermodel.KeyValue { func transformTransactionWithAttributes(t *testing.T, attrs map[string]pdata.AttributeValue, configFns ...func(pdata.Span)) *model.Transaction { traces, spans := newTracesSpans() - otelSpan := pdata.NewSpan() + otelSpan := spans.Spans().AppendEmpty() otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) for _, fn := range configFns { fn(otelSpan) } otelSpan.Attributes().InitFromMap(attrs) - spans.Spans().Append(otelSpan) events := transformTraces(t, traces) return events[0].Transaction } func transformSpanWithAttributes(t *testing.T, attrs map[string]pdata.AttributeValue, configFns ...func(pdata.Span)) *model.Span { traces, spans := newTracesSpans() - otelSpan := pdata.NewSpan() + otelSpan := spans.Spans().AppendEmpty() otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) otelSpan.SetParentSpanID(pdata.NewSpanID([8]byte{3})) @@ -1232,7 +1223,6 @@ func transformSpanWithAttributes(t *testing.T, attrs map[string]pdata.AttributeV fn(otelSpan) } otelSpan.Attributes().InitFromMap(attrs) - spans.Spans().Append(otelSpan) events := transformTraces(t, traces) return events[0].Span } @@ -1242,13 +1232,12 @@ func transformTransactionSpanEvents(t *testing.T, language string, spanEvents .. traces.ResourceSpans().At(0).Resource().Attributes().InitFromMap(map[string]pdata.AttributeValue{ conventions.AttributeTelemetrySDKLanguage: pdata.NewAttributeValueString(language), }) - otelSpan := pdata.NewSpan() + otelSpan := spans.Spans().AppendEmpty() otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) for _, spanEvent := range spanEvents { - otelSpan.Events().Append(spanEvent) + spanEvent.CopyTo(otelSpan.Events().AppendEmpty()) } - spans.Spans().Append(otelSpan) events := transformTraces(t, traces) require.NotEmpty(t, events) @@ -1274,10 +1263,8 @@ func transformTraces(t *testing.T, traces pdata.Traces) model.Batch { func newTracesSpans() (pdata.Traces, pdata.InstrumentationLibrarySpans) { traces := pdata.NewTraces() - resourceSpans := pdata.NewResourceSpans() - librarySpans := pdata.NewInstrumentationLibrarySpans() - resourceSpans.InstrumentationLibrarySpans().Append(librarySpans) - traces.ResourceSpans().Append(resourceSpans) + resourceSpans := traces.ResourceSpans().AppendEmpty() + librarySpans := resourceSpans.InstrumentationLibrarySpans().AppendEmpty() return traces, librarySpans } diff --git a/processor/otel/exceptions_test.go b/processor/otel/exceptions_test.go index e83f26ae16f..556692af423 100644 --- a/processor/otel/exceptions_test.go +++ b/processor/otel/exceptions_test.go @@ -40,7 +40,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" "github.com/elastic/apm-server/model" diff --git a/processor/otel/metadata.go b/processor/otel/metadata.go index 9ea6350703a..b7730baa587 100644 --- a/processor/otel/metadata.go +++ b/processor/otel/metadata.go @@ -23,7 +23,7 @@ import ( "regexp" "strings" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/translator/conventions" "github.com/elastic/apm-server/model" diff --git a/processor/otel/metadata_test.go b/processor/otel/metadata_test.go index 6fd126fca99..d0dbad01679 100644 --- a/processor/otel/metadata_test.go +++ b/processor/otel/metadata_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "github.com/elastic/apm-server/model" "github.com/elastic/beats/v7/libbeat/common" @@ -208,12 +208,12 @@ func TestResourceConventions(t *testing.T) { func TestResourceLabels(t *testing.T) { stringArray := pdata.NewAttributeValueArray() - stringArray.ArrayVal().Append(pdata.NewAttributeValueString("abc")) - stringArray.ArrayVal().Append(pdata.NewAttributeValueString("def")) + stringArray.ArrayVal().AppendEmpty().SetStringVal("abc") + stringArray.ArrayVal().AppendEmpty().SetStringVal("def") intArray := pdata.NewAttributeValueArray() - intArray.ArrayVal().Append(pdata.NewAttributeValueInt(123)) - intArray.ArrayVal().Append(pdata.NewAttributeValueInt(456)) + intArray.ArrayVal().AppendEmpty().SetIntVal(123) + intArray.ArrayVal().AppendEmpty().SetIntVal(456) metadata := transformResourceMetadata(t, map[string]pdata.AttributeValue{ "string_array": stringArray, @@ -228,10 +228,9 @@ func TestResourceLabels(t *testing.T) { func transformResourceMetadata(t *testing.T, resourceAttrs map[string]pdata.AttributeValue) model.Metadata { traces, spans := newTracesSpans() traces.ResourceSpans().At(0).Resource().Attributes().InitFromMap(resourceAttrs) - otelSpan := pdata.NewSpan() + otelSpan := spans.Spans().AppendEmpty() otelSpan.SetTraceID(pdata.NewTraceID([16]byte{1})) otelSpan.SetSpanID(pdata.NewSpanID([8]byte{2})) - spans.Spans().Append(otelSpan) events := transformTraces(t, traces) return events[0].Transaction.Metadata } diff --git a/processor/otel/metrics.go b/processor/otel/metrics.go index 77f9a9fc126..ca0af9e8ec3 100644 --- a/processor/otel/metrics.go +++ b/processor/otel/metrics.go @@ -42,8 +42,7 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/otlptext" + "go.opentelemetry.io/collector/model/pdata" logs "github.com/elastic/apm-server/log" "github.com/elastic/apm-server/model" @@ -57,7 +56,12 @@ func (c *Consumer) ConsumeMetrics(ctx context.Context, metrics pdata.Metrics) er receiveTimestamp := time.Now() logger := logp.NewLogger(logs.Otel) if logger.IsDebug() { - logger.Debug(otlptext.Metrics(metrics)) + data, err := jsonMetricsMarshaler.MarshalMetrics(metrics) + if err != nil { + logger.Debug(err) + } else { + logger.Debug(data) + } } batch := c.convertMetrics(metrics, receiveTimestamp) return c.Processor.ProcessBatch(ctx, batch) @@ -129,8 +133,8 @@ func (c *Consumer) addMetric(metric pdata.Metric, ms *metricsets) bool { ) } return true - case pdata.MetricDataTypeDoubleGauge: - dps := metric.DoubleGauge().DataPoints() + case pdata.MetricDataTypeGauge: + dps := metric.Gauge().DataPoints() for i := 0; i < dps.Len(); i++ { dp := dps.At(i) ms.upsert( @@ -159,8 +163,8 @@ func (c *Consumer) addMetric(metric pdata.Metric, ms *metricsets) bool { ) } return true - case pdata.MetricDataTypeDoubleSum: - dps := metric.DoubleSum().DataPoints() + case pdata.MetricDataTypeSum: + dps := metric.Sum().DataPoints() for i := 0; i < dps.Len(); i++ { dp := dps.At(i) ms.upsert( diff --git a/processor/otel/metrics_test.go b/processor/otel/metrics_test.go index 9548a101776..e6ba139f2c7 100644 --- a/processor/otel/metrics_test.go +++ b/processor/otel/metrics_test.go @@ -41,7 +41,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" "github.com/elastic/apm-server/model" "github.com/elastic/apm-server/processor/otel" @@ -51,15 +51,11 @@ import ( func TestConsumeMetrics(t *testing.T) { metrics := pdata.NewMetrics() - resourceMetrics := pdata.NewResourceMetrics() - metrics.ResourceMetrics().Append(resourceMetrics) - instrumentationLibraryMetrics := pdata.NewInstrumentationLibraryMetrics() - resourceMetrics.InstrumentationLibraryMetrics().Append(instrumentationLibraryMetrics) + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() metricSlice := instrumentationLibraryMetrics.Metrics() appendMetric := func(name string, dataType pdata.MetricDataType) pdata.Metric { - n := metricSlice.Len() - metricSlice.Resize(n + 1) - metric := metricSlice.At(n) + metric := metricSlice.AppendEmpty() metric.SetName(name) metric.SetDataType(dataType) return metric @@ -72,81 +68,91 @@ func TestConsumeMetrics(t *testing.T) { metric := appendMetric("int_gauge_metric", pdata.MetricDataTypeIntGauge) intGauge := metric.IntGauge() - intGauge.DataPoints().Resize(4) - intGauge.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - intGauge.DataPoints().At(0).SetValue(1) - intGauge.DataPoints().At(1).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - intGauge.DataPoints().At(1).SetValue(2) - intGauge.DataPoints().At(1).LabelsMap().InitFromMap(map[string]string{"k": "v"}) - intGauge.DataPoints().At(2).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - intGauge.DataPoints().At(2).SetValue(3) - intGauge.DataPoints().At(3).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - intGauge.DataPoints().At(3).SetValue(4) - intGauge.DataPoints().At(3).LabelsMap().InitFromMap(map[string]string{"k": "v2"}) - - metric = appendMetric("double_gauge_metric", pdata.MetricDataTypeDoubleGauge) - doubleGauge := metric.DoubleGauge() - doubleGauge.DataPoints().Resize(4) - doubleGauge.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - doubleGauge.DataPoints().At(0).SetValue(5) - doubleGauge.DataPoints().At(1).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - doubleGauge.DataPoints().At(1).SetValue(6) - doubleGauge.DataPoints().At(1).LabelsMap().InitFromMap(map[string]string{"k": "v"}) - doubleGauge.DataPoints().At(2).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - doubleGauge.DataPoints().At(2).SetValue(7) - doubleGauge.DataPoints().At(3).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - doubleGauge.DataPoints().At(3).SetValue(8) - doubleGauge.DataPoints().At(3).LabelsMap().InitFromMap(map[string]string{"k": "v2"}) + intGaugeDP0 := intGauge.DataPoints().AppendEmpty() + intGaugeDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + intGaugeDP0.SetValue(1) + intGaugeDP1 := intGauge.DataPoints().AppendEmpty() + intGaugeDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + intGaugeDP1.SetValue(2) + intGaugeDP1.LabelsMap().InitFromMap(map[string]string{"k": "v"}) + intGaugeDP2 := intGauge.DataPoints().AppendEmpty() + intGaugeDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + intGaugeDP2.SetValue(3) + intGaugeDP3 := intGauge.DataPoints().AppendEmpty() + intGaugeDP3.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + intGaugeDP3.SetValue(4) + intGaugeDP3.LabelsMap().InitFromMap(map[string]string{"k": "v2"}) + + metric = appendMetric("gauge_metric", pdata.MetricDataTypeGauge) + gauge := metric.Gauge() + gaugeDP0 := gauge.DataPoints().AppendEmpty() + gaugeDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + gaugeDP0.SetValue(5) + gaugeDP1 := gauge.DataPoints().AppendEmpty() + gaugeDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP1.SetValue(6) + gaugeDP1.LabelsMap().InitFromMap(map[string]string{"k": "v"}) + gaugeDP2 := gauge.DataPoints().AppendEmpty() + gaugeDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP2.SetValue(7) + gaugeDP3 := gauge.DataPoints().AppendEmpty() + gaugeDP3.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + gaugeDP3.SetValue(8) + gaugeDP3.LabelsMap().InitFromMap(map[string]string{"k": "v2"}) metric = appendMetric("int_sum_metric", pdata.MetricDataTypeIntSum) intSum := metric.IntSum() - intSum.DataPoints().Resize(3) - intSum.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - intSum.DataPoints().At(0).SetValue(9) - intSum.DataPoints().At(1).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - intSum.DataPoints().At(1).SetValue(10) - intSum.DataPoints().At(1).LabelsMap().InitFromMap(map[string]string{"k": "v"}) - intSum.DataPoints().At(2).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - intSum.DataPoints().At(2).SetValue(11) - intSum.DataPoints().At(2).LabelsMap().InitFromMap(map[string]string{"k2": "v"}) - - metric = appendMetric("double_sum_metric", pdata.MetricDataTypeDoubleSum) - doubleSum := metric.DoubleSum() - doubleSum.DataPoints().Resize(3) - doubleSum.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - doubleSum.DataPoints().At(0).SetValue(12) - doubleSum.DataPoints().At(1).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - doubleSum.DataPoints().At(1).SetValue(13) - doubleSum.DataPoints().At(1).LabelsMap().InitFromMap(map[string]string{"k": "v"}) - doubleSum.DataPoints().At(2).SetTimestamp(pdata.TimestampFromTime(timestamp1)) - doubleSum.DataPoints().At(2).SetValue(14) - doubleSum.DataPoints().At(2).LabelsMap().InitFromMap(map[string]string{"k2": "v"}) + intSumDP0 := intSum.DataPoints().AppendEmpty() + intSumDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + intSumDP0.SetValue(9) + intSumDP1 := intSum.DataPoints().AppendEmpty() + intSumDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + intSumDP1.SetValue(10) + intSumDP1.LabelsMap().InitFromMap(map[string]string{"k": "v"}) + intSumDP2 := intSum.DataPoints().AppendEmpty() + intSumDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + intSumDP2.SetValue(11) + intSumDP2.LabelsMap().InitFromMap(map[string]string{"k2": "v"}) + + metric = appendMetric("sum_metric", pdata.MetricDataTypeSum) + sum := metric.Sum() + sumDP0 := sum.DataPoints().AppendEmpty() + sumDP0.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + sumDP0.SetValue(12) + sumDP1 := sum.DataPoints().AppendEmpty() + sumDP1.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + sumDP1.SetValue(13) + sumDP1.LabelsMap().InitFromMap(map[string]string{"k": "v"}) + sumDP2 := sum.DataPoints().AppendEmpty() + sumDP2.SetTimestamp(pdata.TimestampFromTime(timestamp1)) + sumDP2.SetValue(14) + sumDP2.LabelsMap().InitFromMap(map[string]string{"k2": "v"}) metric = appendMetric("histogram_metric", pdata.MetricDataTypeHistogram) doubleHistogram := metric.Histogram() - doubleHistogram.DataPoints().Resize(1) - doubleHistogram.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - doubleHistogram.DataPoints().At(0).SetBucketCounts([]uint64{1, 1, 2, 3}) - doubleHistogram.DataPoints().At(0).SetExplicitBounds([]float64{-1.0, 2.0, 3.5}) + doubleHistogramDP := doubleHistogram.DataPoints().AppendEmpty() + doubleHistogramDP.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + doubleHistogramDP.SetBucketCounts([]uint64{1, 1, 2, 3}) + doubleHistogramDP.SetExplicitBounds([]float64{-1.0, 2.0, 3.5}) metric = appendMetric("int_histogram_metric", pdata.MetricDataTypeIntHistogram) intHistogram := metric.IntHistogram() - intHistogram.DataPoints().Resize(1) - intHistogram.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - intHistogram.DataPoints().At(0).SetBucketCounts([]uint64{0, 1, 2, 3}) - intHistogram.DataPoints().At(0).SetExplicitBounds([]float64{1.0, 2.0, 3.0}) + intHistogramDP := intHistogram.DataPoints().AppendEmpty() + intHistogramDP.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + intHistogramDP.SetBucketCounts([]uint64{0, 1, 2, 3}) + intHistogramDP.SetExplicitBounds([]float64{1.0, 2.0, 3.0}) metric = appendMetric("invalid_histogram_metric", pdata.MetricDataTypeHistogram) invalidHistogram := metric.Histogram() - invalidHistogram.DataPoints().Resize(1) - invalidHistogram.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp0)) - invalidHistogram.DataPoints().At(0).SetBucketCounts([]uint64{1, 2, 3}) // should be one more bucket count than bounds - invalidHistogram.DataPoints().At(0).SetExplicitBounds([]float64{1, 2, 3}) + invalidHistogramDP := invalidHistogram.DataPoints().AppendEmpty() + invalidHistogramDP.SetTimestamp(pdata.TimestampFromTime(timestamp0)) + invalidHistogramDP.SetBucketCounts([]uint64{1, 2, 3}) // should be one more bucket count than bounds + invalidHistogramDP.SetExplicitBounds([]float64{1, 2, 3}) expectDropped++ // Summary metrics are not yet supported, and will be dropped. - metric = appendMetric("double_summary_metric", pdata.MetricDataTypeSummary) - metric.Summary().DataPoints().Resize(1) + metric = appendMetric("summary_metric", pdata.MetricDataTypeSummary) + metric.Summary().DataPoints().AppendEmpty() expectDropped++ metadata := model.Metadata{ @@ -169,10 +175,10 @@ func TestConsumeMetrics(t *testing.T) { Metadata: metadata, Timestamp: timestamp0, Samples: map[string]model.MetricsetSample{ - "int_gauge_metric": {Value: 1, Type: "gauge"}, - "double_gauge_metric": {Value: 5, Type: "gauge"}, - "int_sum_metric": {Value: 9, Type: "counter"}, - "double_sum_metric": {Value: 12, Type: "counter"}, + "int_gauge_metric": {Value: 1, Type: "gauge"}, + "gauge_metric": {Value: 5, Type: "gauge"}, + "int_sum_metric": {Value: 9, Type: "counter"}, + "sum_metric": {Value: 12, Type: "counter"}, "histogram_metric": { Type: "histogram", Counts: []int64{1, 1, 2, 3}, @@ -188,49 +194,45 @@ func TestConsumeMetrics(t *testing.T) { Metadata: metadata, Timestamp: timestamp1, Samples: map[string]model.MetricsetSample{ - "int_gauge_metric": {Value: 3, Type: "gauge"}, - "double_gauge_metric": {Value: 7, Type: "gauge"}, + "int_gauge_metric": {Value: 3, Type: "gauge"}, + "gauge_metric": {Value: 7, Type: "gauge"}, }, }, { Metadata: metadata, Timestamp: timestamp1, Labels: common.MapStr{"k": "v"}, Samples: map[string]model.MetricsetSample{ - "int_gauge_metric": {Value: 2, Type: "gauge"}, - "double_gauge_metric": {Value: 6, Type: "gauge"}, - "int_sum_metric": {Value: 10, Type: "counter"}, - "double_sum_metric": {Value: 13, Type: "counter"}, + "int_gauge_metric": {Value: 2, Type: "gauge"}, + "gauge_metric": {Value: 6, Type: "gauge"}, + "int_sum_metric": {Value: 10, Type: "counter"}, + "sum_metric": {Value: 13, Type: "counter"}, }, }, { Metadata: metadata, Timestamp: timestamp1, Labels: common.MapStr{"k": "v2"}, Samples: map[string]model.MetricsetSample{ - "int_gauge_metric": {Value: 4, Type: "gauge"}, - "double_gauge_metric": {Value: 8, Type: "gauge"}, + "int_gauge_metric": {Value: 4, Type: "gauge"}, + "gauge_metric": {Value: 8, Type: "gauge"}, }, }, { Metadata: metadata, Timestamp: timestamp1, Labels: common.MapStr{"k2": "v"}, Samples: map[string]model.MetricsetSample{ - "int_sum_metric": {Value: 11, Type: "counter"}, - "double_sum_metric": {Value: 14, Type: "counter"}, + "int_sum_metric": {Value: 11, Type: "counter"}, + "sum_metric": {Value: 14, Type: "counter"}, }, }}, metricsets) } func TestConsumeMetrics_JVM(t *testing.T) { metrics := pdata.NewMetrics() - resourceMetrics := pdata.NewResourceMetrics() - metrics.ResourceMetrics().Append(resourceMetrics) - instrumentationLibraryMetrics := pdata.NewInstrumentationLibraryMetrics() - resourceMetrics.InstrumentationLibraryMetrics().Append(instrumentationLibraryMetrics) + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() metricSlice := instrumentationLibraryMetrics.Metrics() appendMetric := func(name string, dataType pdata.MetricDataType) pdata.Metric { - n := metricSlice.Len() - metricSlice.Resize(n + 1) - metric := metricSlice.At(n) + metric := metricSlice.AppendEmpty() metric.SetName(name) metric.SetDataType(dataType) return metric @@ -240,18 +242,18 @@ func TestConsumeMetrics_JVM(t *testing.T) { addInt64Sum := func(name string, value int64, labels map[string]string) { metric := appendMetric(name, pdata.MetricDataTypeIntSum) intSum := metric.IntSum() - intSum.DataPoints().Resize(1) - intSum.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp)) - intSum.DataPoints().At(0).SetValue(value) - intSum.DataPoints().At(0).LabelsMap().InitFromMap(labels) + dp := intSum.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(timestamp)) + dp.SetValue(value) + dp.LabelsMap().InitFromMap(labels) } addInt64Gauge := func(name string, value int64, labels map[string]string) { metric := appendMetric(name, pdata.MetricDataTypeIntGauge) intSum := metric.IntGauge() - intSum.DataPoints().Resize(1) - intSum.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(timestamp)) - intSum.DataPoints().At(0).SetValue(value) - intSum.DataPoints().At(0).LabelsMap().InitFromMap(labels) + dp := intSum.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(timestamp)) + dp.SetValue(value) + dp.LabelsMap().InitFromMap(labels) } addInt64Sum("runtime.jvm.gc.time", 9, map[string]string{"gc": "G1 Young Generation"}) addInt64Sum("runtime.jvm.gc.count", 2, map[string]string{"gc": "G1 Young Generation"}) @@ -322,9 +324,8 @@ func TestConsumeMetrics_JVM(t *testing.T) { } func TestConsumeMetricsExportTimestamp(t *testing.T) { - resourceMetrics := pdata.NewResourceMetrics() metrics := pdata.NewMetrics() - metrics.ResourceMetrics().Append(resourceMetrics) + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() // The actual timestamps will be non-deterministic, as they are adjusted // based on the server's clock. @@ -344,17 +345,14 @@ func TestConsumeMetricsExportTimestamp(t *testing.T) { dataPointOffset := -time.Second exportedDataPointTimestamp := exportTimestamp.Add(dataPointOffset) - instrumentationLibraryMetrics := pdata.NewInstrumentationLibraryMetrics() - resourceMetrics.InstrumentationLibraryMetrics().Append(instrumentationLibraryMetrics) - - metric := pdata.NewMetric() + instrumentationLibraryMetrics := resourceMetrics.InstrumentationLibraryMetrics().AppendEmpty() + metric := instrumentationLibraryMetrics.Metrics().AppendEmpty() metric.SetName("int_gauge") metric.SetDataType(pdata.MetricDataTypeIntGauge) intGauge := metric.IntGauge() - intGauge.DataPoints().Resize(1) - intGauge.DataPoints().At(0).SetTimestamp(pdata.TimestampFromTime(exportedDataPointTimestamp)) - intGauge.DataPoints().At(0).SetValue(1) - instrumentationLibraryMetrics.Metrics().Append(metric) + dp := intGauge.DataPoints().AppendEmpty() + dp.SetTimestamp(pdata.TimestampFromTime(exportedDataPointTimestamp)) + dp.SetValue(1) metricsets, _ := transformMetrics(t, metrics) require.Len(t, metricsets, 1) diff --git a/processor/otel/timestamps.go b/processor/otel/timestamps.go index 81f52fb0981..17f0403f329 100644 --- a/processor/otel/timestamps.go +++ b/processor/otel/timestamps.go @@ -20,7 +20,7 @@ package otel import ( "time" - "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/model/pdata" ) // exportTimestamp extracts the `telemetry.sdk.elastic_export_timestamp`