diff --git a/.chloggen/configcompression-include-lz4-compression.yaml b/.chloggen/configcompression-include-lz4-compression.yaml new file mode 100644 index 00000000000..383a3b83d05 --- /dev/null +++ b/.chloggen/configcompression-include-lz4-compression.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: "enhancement" + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: "configcompression" + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add support for lz4 compression + +# One or more tracking issues or pull requests related to the change +issues: [9128] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/exportertest-separate-module.yaml b/.chloggen/exportertest-separate-module.yaml new file mode 100644 index 00000000000..075324d462d --- /dev/null +++ b/.chloggen/exportertest-separate-module.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: exporter/expotertest + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Put expotertest into its own module + +# One or more tracking issues or pull requests related to the change +issues: [11461] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/merge-function-as-requet-method.yaml b/.chloggen/merge-function-as-requet-method.yaml new file mode 100644 index 00000000000..fae049f3f02 --- /dev/null +++ b/.chloggen/merge-function-as-requet-method.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: exporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Made mergeFunc and mergeSplitFunc required method of exporter.Request + +# One or more tracking issues or pull requests related to the change +issues: [10368] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + mergeFunc and mergeSplitFunc used to be part of the configuration pass to the exporter. Now it is changed + | to be a method function of request. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.github/ISSUE_TEMPLATE/stabilization.md b/.github/ISSUE_TEMPLATE/stabilization.md index aea8102517b..62e9b8f1018 100644 --- a/.github/ISSUE_TEMPLATE/stabilization.md +++ b/.github/ISSUE_TEMPLATE/stabilization.md @@ -12,7 +12,7 @@ Before stabilizing a module, an approver or maintainer must make sure that the f - [ ] No TODOs in the module code that would require breaking changes - [ ] No deprecated symbols in the module - [ ] No symbols marked as experimental in the module -- [ ] The module follows the [Coding guidelines](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CONTRIBUTING.md) +- [ ] The module follows the [Coding guidelines](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/coding-guidelines.md) Please also make sure to publicly announce our intent to stabilize the module on: diff --git a/Makefile b/Makefile index 1a4357bd5b9..cab7722c87a 100644 --- a/Makefile +++ b/Makefile @@ -294,6 +294,7 @@ check-contrib: -replace go.opentelemetry.io/collector/exporter=$(CURDIR)/exporter \ -replace go.opentelemetry.io/collector/exporter/debugexporter=$(CURDIR)/exporter/debugexporter \ -replace go.opentelemetry.io/collector/exporter/exporterprofiles=$(CURDIR)/exporter/exporterprofiles \ + -replace go.opentelemetry.io/collector/exporter/exportertest=$(CURDIR)/exporter/exportertest \ -replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles=$(CURDIR)/exporter/exporterhelper/exporterhelperprofiles \ -replace go.opentelemetry.io/collector/exporter/nopexporter=$(CURDIR)/exporter/nopexporter \ -replace go.opentelemetry.io/collector/exporter/otlpexporter=$(CURDIR)/exporter/otlpexporter \ @@ -368,6 +369,7 @@ restore-contrib: -dropreplace go.opentelemetry.io/collector/consumer/consumertest \ -dropreplace go.opentelemetry.io/collector/exporter \ -dropreplace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles \ + -dropreplace go.opentelemetry.io/collector/exporter/exportertest \ -dropreplace go.opentelemetry.io/collector/exporter/debugexporter \ -dropreplace go.opentelemetry.io/collector/exporter/nopexporter \ -dropreplace go.opentelemetry.io/collector/exporter/otlpexporter \ diff --git a/client/go.mod b/client/go.mod index 6322ea7912d..5fd9e460dd9 100644 --- a/client/go.mod +++ b/client/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/pdata v1.17.0 go.uber.org/goleak v1.3.0 ) diff --git a/cmd/builder/README.md b/cmd/builder/README.md index 159ac167ae7..8d26c3b5816 100644 --- a/cmd/builder/README.md +++ b/cmd/builder/README.md @@ -57,11 +57,34 @@ $ /tmp/dist/otelcol-custom --config=/tmp/otelcol.yaml ## Installation -There are two supported ways to install the builder: via the official releases (recommended) and through `go install`. +There are three supported ways to install the builder: +1. Via official release Docker images (recommended) +2. Via official release binaries (recommended) +3. Through `go install` (not recommended) -### Official releases +### Official release Docker image -This is the recommended installation method. Download the binary for your respective platform from the ["Releases"](https://github.com/open-telemetry/opentelemetry-collector-releases/releases?q=cmd/builder) page. +You will find the official docker images at [DockerHub](https://hub.docker.com/r/otel/opentelemetry-collector-builder). + +Pull the image via tagged version number (e.g. v0.110.0) or 'latest'. You may also specify platform, although Docker will handle this automatically as it is a multi-platform build. + +``` +docker pull otel/opentelemetry-collector-builder:latest +``` + +The included builder configuration file/manifest should be replaced by mounting a file from your local filesystem to the docker container; the default location is `/build/builder-config.yaml`. If you mount a file at a different location inside the container, your `builder.config.yaml` must be specified as a command line argument to ocb. Additionally, the output folder must also be mounted from your local system to the docker container. This output directory must be specified in your `builder-config.yaml` file as it cannot be set via the command-line arguments. + +Assuming you are running this image in your working directory, have a `builder-config.yaml` file located in this folder, the `dist.output_path` item inside your `builder-config.yaml` is set to `./otelcol-dev`, and you wish to output the binary/go module files to a folder named `output`, the command would look as follows: + +``` +docker run -v "$(pwd)/builder-config.yaml:/build/builder-config.yaml" -v "$(pwd)/output:/build/otelcol-dev" otel/opentelemetry-collector-builder:latest +``` + +Additional arguments may be passed to ocb on the command line as specified below, but if you wish to do this, you must make sure to pass the `--config` argument, as this is specified as an additional `CMD`, not an entrypoint. + +### Official release binaries + +This is the recommended installation method for the binary. Download the binary for your respective platform from the ["Releases"](https://github.com/open-telemetry/opentelemetry-collector-releases/releases?q=cmd/builder) page. ### `go install` diff --git a/cmd/builder/internal/builder/main_test.go b/cmd/builder/internal/builder/main_test.go index 8152cee79a9..787f472762b 100644 --- a/cmd/builder/internal/builder/main_test.go +++ b/cmd/builder/internal/builder/main_test.go @@ -70,6 +70,7 @@ var ( "/exporter", "/exporter/debugexporter", "/exporter/exporterprofiles", + "/exporter/exportertest", "/exporter/exporterhelper/exporterhelperprofiles", "/exporter/nopexporter", "/exporter/otlpexporter", diff --git a/cmd/builder/test/core.builder.yaml b/cmd/builder/test/core.builder.yaml index af5d1981932..459e892d0bb 100644 --- a/cmd/builder/test/core.builder.yaml +++ b/cmd/builder/test/core.builder.yaml @@ -42,6 +42,7 @@ replaces: - go.opentelemetry.io/collector/exporter => ${WORKSPACE_DIR}/exporter - go.opentelemetry.io/collector/exporter/debugexporter => ${WORKSPACE_DIR}/exporter/debugexporter - go.opentelemetry.io/collector/exporter/exporterprofiles => ${WORKSPACE_DIR}/exporter/exporterprofiles + - go.opentelemetry.io/collector/exporter/exportertest => ${WORKSPACE_DIR}/exporter/exportertest - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ${WORKSPACE_DIR}/exporter/exporterhelper/exporterhelperprofiles - go.opentelemetry.io/collector/extension => ${WORKSPACE_DIR}/extension - go.opentelemetry.io/collector/extension/auth => ${WORKSPACE_DIR}/extension/auth diff --git a/cmd/mdatagen/go.mod b/cmd/mdatagen/go.mod index 3717552bb7f..5581ec9b7b3 100644 --- a/cmd/mdatagen/go.mod +++ b/cmd/mdatagen/go.mod @@ -10,12 +10,12 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/filter v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 - go.opentelemetry.io/collector/processor v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/receiver v0.111.0 go.opentelemetry.io/collector/semconv v0.111.0 go.opentelemetry.io/otel/metric v1.31.0 @@ -45,7 +45,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect diff --git a/cmd/otelcorecol/builder-config.yaml b/cmd/otelcorecol/builder-config.yaml index 0c4b06f4af0..09ba29cec66 100644 --- a/cmd/otelcorecol/builder-config.yaml +++ b/cmd/otelcorecol/builder-config.yaml @@ -69,6 +69,7 @@ replaces: - go.opentelemetry.io/collector/connector/forwardconnector => ../../connector/forwardconnector - go.opentelemetry.io/collector/exporter => ../../exporter - go.opentelemetry.io/collector/exporter/debugexporter => ../../exporter/debugexporter + - go.opentelemetry.io/collector/exporter/exportertest => ../../exporter/exportertest - go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../../exporter/exporterhelper/exporterhelperprofiles - go.opentelemetry.io/collector/exporter/nopexporter => ../../exporter/nopexporter diff --git a/cmd/otelcorecol/go.mod b/cmd/otelcorecol/go.mod index f8bc7e30897..d58a71fb035 100644 --- a/cmd/otelcorecol/go.mod +++ b/cmd/otelcorecol/go.mod @@ -25,7 +25,7 @@ require ( go.opentelemetry.io/collector/extension/memorylimiterextension v0.111.0 go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 go.opentelemetry.io/collector/otelcol v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.111.0 go.opentelemetry.io/collector/receiver v0.111.0 @@ -93,14 +93,15 @@ require ( go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect go.opentelemetry.io/collector/config/internal v0.111.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/connector/connectortest v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 // indirect go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect @@ -110,9 +111,9 @@ require ( go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect go.opentelemetry.io/collector/pipeline v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.111.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/collector/semconv v0.111.0 // indirect go.opentelemetry.io/collector/service v0.111.0 // indirect @@ -214,6 +215,8 @@ replace go.opentelemetry.io/collector/exporter => ../../exporter replace go.opentelemetry.io/collector/exporter/debugexporter => ../../exporter/debugexporter +replace go.opentelemetry.io/collector/exporter/exportertest => ../../exporter/exportertest + replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../../exporter/exporterhelper/exporterhelperprofiles diff --git a/connector/connectorprofiles/go.mod b/connector/connectorprofiles/go.mod index 655f8219d5d..5f1004d10d9 100644 --- a/connector/connectorprofiles/go.mod +++ b/connector/connectorprofiles/go.mod @@ -7,13 +7,13 @@ require ( go.opentelemetry.io/collector v0.111.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/connector v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 ) require ( diff --git a/connector/connectortest/go.mod b/connector/connectortest/go.mod index 23755de583e..9554fdb6a4f 100644 --- a/connector/connectortest/go.mod +++ b/connector/connectortest/go.mod @@ -8,7 +8,7 @@ require ( go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/connector v0.111.0 go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 @@ -28,7 +28,7 @@ require ( go.opentelemetry.io/collector v0.111.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect go.opentelemetry.io/collector/pipeline v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/otel v1.31.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect diff --git a/connector/forwardconnector/go.mod b/connector/forwardconnector/go.mod index 5fdd198cc6d..574905764b8 100644 --- a/connector/forwardconnector/go.mod +++ b/connector/forwardconnector/go.mod @@ -7,8 +7,8 @@ require ( go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/connector v0.111.0 - go.opentelemetry.io/collector/connector/connectortest v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pipeline v0.111.0 @@ -36,7 +36,7 @@ require ( go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/otel v1.31.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect diff --git a/connector/go.mod b/connector/go.mod index 89628737b57..f4ca58de6c6 100644 --- a/connector/go.mod +++ b/connector/go.mod @@ -6,7 +6,7 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector v0.111.0 go.opentelemetry.io/collector/component v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 diff --git a/consumer/consumererror/consumererrorprofiles/go.mod b/consumer/consumererror/consumererrorprofiles/go.mod index 04f55f4aa33..92c89f9f992 100644 --- a/consumer/consumererror/consumererrorprofiles/go.mod +++ b/consumer/consumererror/consumererrorprofiles/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 ) diff --git a/consumer/consumerprofiles/go.mod b/consumer/consumerprofiles/go.mod index fe83e9a165e..a78c7f30091 100644 --- a/consumer/consumerprofiles/go.mod +++ b/consumer/consumerprofiles/go.mod @@ -10,7 +10,7 @@ replace go.opentelemetry.io/collector/consumer => ../ require ( github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 ) diff --git a/consumer/consumertest/go.mod b/consumer/consumertest/go.mod index 0feda3e38ae..ac5cbe984d0 100644 --- a/consumer/consumertest/go.mod +++ b/consumer/consumertest/go.mod @@ -6,7 +6,7 @@ replace go.opentelemetry.io/collector/consumer => ../ require ( github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 diff --git a/docs/release.md b/docs/release.md index 0497f9bc7e5..f93870afcbe 100644 --- a/docs/release.md +++ b/docs/release.md @@ -91,6 +91,8 @@ The last step of the release process creates artifacts for the new version of th 3. build and release ocb binaries under a separate tagged Github release, e.g. `cmd/builder/v0.85.0` + 4. build and push ocb Docker images to `https://hub.docker.com/r/otel/opentelemetry-collector-builder` and the GitHub Container Registry within the releases repository + ## Troubleshooting 1. `unknown revision internal/coreinternal/v0.85.0` -- This is typically an indication that there's a dependency on a new module. You can fix it by adding a new `replaces` entry to the `go.mod` for the affected module. diff --git a/exporter/debugexporter/go.mod b/exporter/debugexporter/go.mod index 323767d0129..931c72ec3d9 100644 --- a/exporter/debugexporter/go.mod +++ b/exporter/debugexporter/go.mod @@ -7,8 +7,9 @@ require ( go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 @@ -35,7 +36,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect @@ -92,4 +93,6 @@ replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterpr replace go.opentelemetry.io/collector/pipeline => ../../pipeline +replace go.opentelemetry.io/collector/exporter/exportertest => ../exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../../consumer/consumererror diff --git a/exporter/exporterbatcher/batch_func.go b/exporter/exporterbatcher/batch_func.go deleted file mode 100644 index 0298276ba7b..00000000000 --- a/exporter/exporterbatcher/batch_func.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterbatcher // import "go.opentelemetry.io/collector/exporter/exporterbatcher" - -import "context" - -// BatchMergeFunc is a function that merges two requests into a single request. -// Do not mutate the requests passed to the function if error can be returned after mutation or if the exporter is -// marked as not mutable. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeFunc[T any] func(context.Context, T, T) (T, error) - -// BatchMergeSplitFunc is a function that merge and/or splits one or two requests into multiple requests based on the -// configured limit provided in MaxSizeConfig. -// All the returned requests MUST have a number of items that does not exceed the maximum number of items. -// Size of the last returned request MUST be less or equal than the size of any other returned request. -// The original request MUST not be mutated if error is returned after mutation or if the exporter is -// marked as not mutable. The length of the returned slice MUST not be 0. The optionalReq argument can be nil, -// make sure to check it before using. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeSplitFunc[T any] func(ctx context.Context, cfg MaxSizeConfig, optionalReq T, req T) ([]T, error) diff --git a/exporter/exporterhelper/common.go b/exporter/exporterhelper/common.go index 7f396f40776..ab1f0db4e0b 100644 --- a/exporter/exporterhelper/common.go +++ b/exporter/exporterhelper/common.go @@ -69,11 +69,3 @@ func WithCapabilities(capabilities consumer.Capabilities) Option { func WithBatcher(cfg exporterbatcher.Config) Option { return internal.WithBatcher(cfg) } - -// WithBatchFuncs enables setting custom batch merge functions. -// This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func WithBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], - msf exporterbatcher.BatchMergeSplitFunc[Request]) Option { - return internal.WithBatchFuncs(mf, msf) -} diff --git a/exporter/exporterhelper/exporterhelperprofiles/go.mod b/exporter/exporterhelper/exporterhelperprofiles/go.mod index 1057dfd35b7..dea9f1e2626 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/go.mod +++ b/exporter/exporterhelper/exporterhelperprofiles/go.mod @@ -6,17 +6,17 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configretry v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/exporter v0.111.0 go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 - go.opentelemetry.io/collector/pdata v1.17.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/sdk v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 @@ -37,6 +37,7 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect go.opentelemetry.io/collector/extension v0.111.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect + go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pipeline v0.111.0 // indirect go.opentelemetry.io/collector/receiver v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect @@ -88,4 +89,6 @@ replace go.opentelemetry.io/collector/extension/experimental/storage => ../../.. replace go.opentelemetry.io/collector/pipeline => ../../../pipeline +replace go.opentelemetry.io/collector/exporter/exportertest => ../../exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../../../consumer/consumererror diff --git a/exporter/exporterhelper/exporterhelperprofiles/profiles.go b/exporter/exporterhelper/exporterhelperprofiles/profiles.go index 89410f22892..069613dbed3 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/profiles.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles.go @@ -88,7 +88,6 @@ func NewProfilesExporter( } profilesOpts := []exporterhelper.Option{ internal.WithMarshaler(profilesRequestMarshaler), internal.WithUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), - internal.WithBatchFuncs(mergeProfiles, mergeSplitProfiles), } return NewProfilesRequestExporter(ctx, set, requestFromProfiles(pusher), append(profilesOpts, options...)...) } diff --git a/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go index 0db7d879e20..fc725666438 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go @@ -12,29 +12,28 @@ import ( "go.opentelemetry.io/collector/pdata/pprofile" ) -// mergeProfiles merges two profiles requests into one. -func mergeProfiles(_ context.Context, r1 exporterhelper.Request, r2 exporterhelper.Request) (exporterhelper.Request, error) { - tr1, ok1 := r1.(*profilesRequest) +// Merge merges two profiles requests into one. +func (req *profilesRequest) Merge(_ context.Context, r2 exporterhelper.Request) (exporterhelper.Request, error) { tr2, ok2 := r2.(*profilesRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - tr2.pd.ResourceProfiles().MoveAndAppendTo(tr1.pd.ResourceProfiles()) - return tr1, nil + tr2.pd.ResourceProfiles().MoveAndAppendTo(req.pd.ResourceProfiles()) + return req, nil } -// mergeSplitProfiles splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. -func mergeSplitProfiles(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 exporterhelper.Request, r2 exporterhelper.Request) ([]exporterhelper.Request, error) { +// MergeSplit splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. +func (req *profilesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 exporterhelper.Request) ([]exporterhelper.Request, error) { var ( res []exporterhelper.Request destReq *profilesRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []exporterhelper.Request{r1, r2} { - if req == nil { + for _, r := range []exporterhelper.Request{req, r2} { + if r == nil { continue } - srcReq, ok := req.(*profilesRequest) + srcReq, ok := r.(*profilesRequest) if !ok { return nil, errors.New("invalid input type") } diff --git a/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go index 0272d8126b1..9674e2d3fd4 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go @@ -12,27 +12,25 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/pdata/pprofile" - "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/testdata" ) func TestMergeProfiles(t *testing.T) { pr1 := &profilesRequest{pd: testdata.GenerateProfiles(2)} pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} - res, err := mergeProfiles(context.Background(), pr1, pr2) + res, err := pr1.Merge(context.Background(), pr2) require.NoError(t, err) fmt.Fprintf(os.Stdout, "%#v\n", res.(*profilesRequest).pd) assert.Equal(t, 5, res.(*profilesRequest).pd.SampleCount()) } func TestMergeProfilesInvalidInput(t *testing.T) { - pr1 := &tracesRequest{td: testdata.GenerateTraces(2)} + pr1 := &dummyRequest{} pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} - _, err := mergeProfiles(context.Background(), pr1, pr2) + _, err := pr2.Merge(context.Background(), pr1) assert.Error(t, err) } @@ -51,13 +49,6 @@ func TestMergeSplitProfiles(t *testing.T) { pr2: &profilesRequest{pd: pprofile.NewProfiles()}, expected: []*profilesRequest{{pd: pprofile.NewProfiles()}}, }, - { - name: "both_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - pr1: nil, - pr2: nil, - expected: []*profilesRequest{}, - }, { name: "first_request_empty", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, @@ -66,17 +57,10 @@ func TestMergeSplitProfiles(t *testing.T) { expected: []*profilesRequest{{pd: testdata.GenerateProfiles(5)}}, }, { - name: "first_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - pr1: nil, - pr2: &profilesRequest{pd: testdata.GenerateProfiles(5)}, - expected: []*profilesRequest{{pd: testdata.GenerateProfiles(5)}}, - }, - { - name: "first_nil_second_empty", + name: "first_empty_second_nil", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - pr1: nil, - pr2: &profilesRequest{pd: pprofile.NewProfiles()}, + pr1: &profilesRequest{pd: pprofile.NewProfiles()}, + pr2: nil, expected: []*profilesRequest{{pd: pprofile.NewProfiles()}}, }, { @@ -93,8 +77,8 @@ func TestMergeSplitProfiles(t *testing.T) { { name: "split_only", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 4}, - pr1: nil, - pr2: &profilesRequest{pd: testdata.GenerateProfiles(10)}, + pr1: &profilesRequest{pd: testdata.GenerateProfiles(10)}, + pr2: nil, expected: []*profilesRequest{ {pd: testdata.GenerateProfiles(4)}, {pd: testdata.GenerateProfiles(4)}, @@ -133,7 +117,7 @@ func TestMergeSplitProfiles(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res, err := mergeSplitProfiles(context.Background(), tt.cfg, tt.pr1, tt.pr2) + res, err := tt.pr1.MergeSplit(context.Background(), tt.cfg, tt.pr2) require.NoError(t, err) assert.Equal(t, len(tt.expected), len(res)) for i, r := range res { @@ -145,9 +129,9 @@ func TestMergeSplitProfiles(t *testing.T) { } func TestMergeSplitProfilesInvalidInput(t *testing.T) { - r1 := &tracesRequest{td: testdata.GenerateTraces(2)} + r1 := &dummyRequest{} r2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} - _, err := mergeSplitProfiles(context.Background(), exporterbatcher.MaxSizeConfig{}, r1, r2) + _, err := r2.MergeSplit(context.Background(), exporterbatcher.MaxSizeConfig{}, r1) assert.Error(t, err) } @@ -160,15 +144,23 @@ func TestExtractProfiles(t *testing.T) { } } -type tracesRequest struct { - td ptrace.Traces - pusher consumer.ConsumeTracesFunc +// dummyRequest implements Request. It is for checking that merging two request types would fail +type dummyRequest struct { +} + +func (req *dummyRequest) Export(_ context.Context) error { + return nil +} + +func (req *dummyRequest) ItemsCount() int { + return 1 } -func (req *tracesRequest) Export(ctx context.Context) error { - return req.pusher(ctx, req.td) +func (req *dummyRequest) Merge(_ context.Context, _ exporterhelper.Request) (exporterhelper.Request, error) { + return nil, nil } -func (req *tracesRequest) ItemsCount() int { - return req.td.SpanCount() +func (req *dummyRequest) MergeSplit(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ exporterhelper.Request) ( + []exporterhelper.Request, error) { + return nil, nil } diff --git a/exporter/exporterhelper/internal/base_exporter.go b/exporter/exporterhelper/internal/base_exporter.go index 1aebb318c8f..763972e793e 100644 --- a/exporter/exporterhelper/internal/base_exporter.go +++ b/exporter/exporterhelper/internal/base_exporter.go @@ -35,9 +35,6 @@ type BaseExporter struct { Signal pipeline.Signal - BatchMergeFunc exporterbatcher.BatchMergeFunc[internal.Request] - BatchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[internal.Request] - Marshaler exporterqueue.Marshaler[internal.Request] Unmarshaler exporterqueue.Unmarshaler[internal.Request] @@ -104,10 +101,7 @@ func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSe } if be.BatcherCfg.Enabled { - bs := NewBatchSender(be.BatcherCfg, be.Set, be.BatchMergeFunc, be.BatchMergeSplitfunc) - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - err = multierr.Append(err, fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters")) - } + bs := NewBatchSender(be.BatcherCfg, be.Set) be.BatchSender = bs } @@ -298,16 +292,6 @@ func WithUnmarshaler(unmarshaler exporterqueue.Unmarshaler[internal.Request]) Op } } -// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. -// It must be provided as the first option when creating a new exporter helper. -func WithBatchFuncs(mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) Option { - return func(o *BaseExporter) error { - o.BatchMergeFunc = mf - o.BatchMergeSplitfunc = msf - return nil - } -} - func CheckStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { if err != nil { require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) diff --git a/exporter/exporterhelper/internal/batch_sender.go b/exporter/exporterhelper/internal/batch_sender.go index 65d7e0965f7..21eed2c91d8 100644 --- a/exporter/exporterhelper/internal/batch_sender.go +++ b/exporter/exporterhelper/internal/batch_sender.go @@ -24,9 +24,7 @@ import ( // - concurrencyLimit is reached. type BatchSender struct { BaseRequestSender - cfg exporterbatcher.Config - mergeFunc exporterbatcher.BatchMergeFunc[internal.Request] - mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[internal.Request] + cfg exporterbatcher.Config // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. // If this number is reached and all the goroutines are busy, the batch will be sent right away. @@ -46,14 +44,11 @@ type BatchSender struct { } // newBatchSender returns a new batch consumer component. -func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings, - mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) *BatchSender { +func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings) *BatchSender { bs := &BatchSender{ activeBatch: newEmptyBatch(), cfg: cfg, logger: set.Logger, - mergeFunc: mf, - mergeSplitFunc: msf, shutdownCh: nil, shutdownCompleteCh: make(chan struct{}), stopped: &atomic.Bool{}, @@ -156,10 +151,17 @@ func (bs *BatchSender) Send(ctx context.Context, req internal.Request) error { func (bs *BatchSender) sendMergeSplitBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() - reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) - if err != nil || len(reqs) == 0 { + var reqs []internal.Request + var mergeSplitErr error + if bs.activeBatch.request == nil { + reqs, mergeSplitErr = req.MergeSplit(ctx, bs.cfg.MaxSizeConfig, nil) + } else { + reqs, mergeSplitErr = bs.activeBatch.request.MergeSplit(ctx, bs.cfg.MaxSizeConfig, req) + } + + if mergeSplitErr != nil || len(reqs) == 0 { bs.mu.Unlock() - return err + return mergeSplitErr } bs.activeRequests.Add(1) @@ -201,7 +203,7 @@ func (bs *BatchSender) sendMergeBatch(ctx context.Context, req internal.Request) if bs.activeBatch.request != nil { var err error - req, err = bs.mergeFunc(ctx, bs.activeBatch.request, req) + req, err = bs.activeBatch.request.Merge(ctx, req) if err != nil { bs.mu.Unlock() return err diff --git a/exporter/exporterhelper/internal/batch_sender_test.go b/exporter/exporterhelper/internal/batch_sender_test.go index f6d53bca0e0..f75febca205 100644 --- a/exporter/exporterhelper/internal/batch_sender_test.go +++ b/exporter/exporterhelper/internal/batch_sender_test.go @@ -47,7 +47,7 @@ func TestBatchSender_Merge(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - be := queueBatchExporter(t, tt.batcherOption, WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)) + be := queueBatchExporter(t, tt.batcherOption) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { @@ -117,7 +117,7 @@ func TestBatchSender_BatchExportError(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - be := queueBatchExporter(t, tt.batcherOption, WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)) + be := queueBatchExporter(t, tt.batcherOption) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { @@ -153,7 +153,7 @@ func TestBatchSender_MergeOrSplit(t *testing.T) { cfg.MinSizeItems = 5 cfg.MaxSizeItems = 10 cfg.FlushTimeout = 100 * time.Millisecond - be := queueBatchExporter(t, WithBatcher(cfg), WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)) + be := queueBatchExporter(t, WithBatcher(cfg)) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { @@ -170,7 +170,6 @@ func TestBatchSender_MergeOrSplit(t *testing.T) { // big request should be broken down into two requests, both are sent right away. require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 17, sink: sink})) - assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 3 && sink.itemsCount.Load() == 25 }, 50*time.Millisecond, 10*time.Millisecond) @@ -190,7 +189,7 @@ func TestBatchSender_MergeOrSplit(t *testing.T) { func TestBatchSender_Shutdown(t *testing.T) { batchCfg := exporterbatcher.NewDefaultConfig() batchCfg.MinSizeItems = 10 - be := queueBatchExporter(t, WithBatcher(batchCfg), WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)) + be := queueBatchExporter(t, WithBatcher(batchCfg)) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -212,7 +211,6 @@ func TestBatchSender_Disabled(t *testing.T) { cfg.Enabled = false cfg.MaxSizeItems = 5 be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(cfg)) require.NotNil(t, be) require.NoError(t, err) @@ -229,39 +227,38 @@ func TestBatchSender_Disabled(t *testing.T) { assert.Equal(t, int64(8), sink.itemsCount.Load()) } -func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { - invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ internal.Request, req2 internal.Request) ([]internal.Request, - error) { - // reply with invalid 0 length slice if req2 is more than 20 items - if req2.(*fakeRequest).items > 20 { - return []internal.Request{}, nil - } - // otherwise reply with a single request. - return []internal.Request{req2}, nil - } - cfg := exporterbatcher.NewDefaultConfig() - cfg.FlushTimeout = 50 * time.Millisecond - cfg.MaxSizeItems = 20 - be := queueBatchExporter(t, WithBatcher(cfg), WithBatchFuncs(fakeBatchMergeFunc, invalidMergeSplitFunc)) - - require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - t.Cleanup(func() { - require.NoError(t, be.Shutdown(context.Background())) - }) - - sink := newFakeRequestSink() - // first request should be ignored due to invalid merge/split function. - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 30, sink: sink})) - // second request should be sent after reaching the timeout. - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 15, sink: sink})) - assert.Eventually(t, func() bool { - return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 15 - }, 100*time.Millisecond, 10*time.Millisecond) -} +// func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { +// invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ internal.Request, req2 internal.Request) ([]internal.Request, +// error) { +// // reply with invalid 0 length slice if req2 is more than 20 items +// if req2.(*fakeRequest).items > 20 { +// return []internal.Request{}, nil +// } +// // otherwise reply with a single request. +// return []internal.Request{req2}, nil +// } +// cfg := exporterbatcher.NewDefaultConfig() +// cfg.FlushTimeout = 50 * time.Millisecond +// cfg.MaxSizeItems = 20 +// be := queueBatchExporter(t, WithBatcher(cfg), WithBatchFuncs(fakeBatchMergeFunc, invalidMergeSplitFunc)) + +// require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) +// t.Cleanup(func() { +// require.NoError(t, be.Shutdown(context.Background())) +// }) + +// sink := newFakeRequestSink() +// // first request should be ignored due to invalid merge/split function. +// require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 30, sink: sink})) +// // second request should be sent after reaching the timeout. +// require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 15, sink: sink})) +// assert.Eventually(t, func() bool { +// return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 15 +// }, 100*time.Millisecond, 10*time.Millisecond) +// } func TestBatchSender_PostShutdown(t *testing.T) { be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())) require.NotNil(t, be) require.NoError(t, err) @@ -323,7 +320,6 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { qCfg := exporterqueue.NewDefaultConfig() qCfg.NumConsumers = 2 be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(tt.batcherCfg), WithRequestQueue(qCfg, exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NotNil(t, be) @@ -379,7 +375,6 @@ func TestBatchSender_BatchBlocking(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 3 be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(bCfg)) require.NotNil(t, be) require.NoError(t, err) @@ -410,7 +405,6 @@ func TestBatchSender_BatchCancelled(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(bCfg)) require.NotNil(t, be) require.NoError(t, err) @@ -446,7 +440,6 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(bCfg)) require.NotNil(t, be) require.NoError(t, err) @@ -476,45 +469,8 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) { assert.Equal(t, int64(3), sink.itemsCount.Load()) } -func TestBatchSender_WithBatcherOption(t *testing.T) { - tests := []struct { - name string - opts []Option - expectedErr bool - }{ - { - name: "no_funcs_set", - opts: []Option{WithBatcher(exporterbatcher.NewDefaultConfig())}, - expectedErr: true, - }, - { - name: "funcs_set_internally", - opts: []Option{WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())}, - expectedErr: false, - }, - { - name: "nil_funcs", - opts: []Option{WithBatchFuncs(nil, nil), WithBatcher(exporterbatcher.NewDefaultConfig())}, - expectedErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, tt.opts...) - if tt.expectedErr { - assert.Nil(t, be) - assert.Error(t, err) - } else { - assert.NotNil(t, be) - assert.NoError(t, err) - } - }) - } -} - func TestBatchSender_UnstartedShutdown(t *testing.T) { be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())) require.NoError(t, err) @@ -524,51 +480,50 @@ func TestBatchSender_UnstartedShutdown(t *testing.T) { // TestBatchSender_ShutdownDeadlock tests that the exporter does not deadlock when shutting down while a batch is being // merged. -func TestBatchSender_ShutdownDeadlock(t *testing.T) { - blockMerge := make(chan struct{}) - waitMerge := make(chan struct{}, 10) - - // blockedBatchMergeFunc blocks until the blockMerge channel is closed - blockedBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { - waitMerge <- struct{}{} - <-blockMerge - r1.(*fakeRequest).items += r2.(*fakeRequest).items - return r1, nil - } - - bCfg := exporterbatcher.NewDefaultConfig() - bCfg.FlushTimeout = 10 * time.Minute // high timeout to avoid the timeout to trigger - be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(blockedBatchMergeFunc, fakeBatchMergeSplitFunc), - WithBatcher(bCfg)) - require.NoError(t, err) - require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - - sink := newFakeRequestSink() - - // Send 2 concurrent requests - go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() - go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() - - // Wait for the requests to enter the merge function - <-waitMerge - - // Initiate the exporter shutdown, unblock the batch merge function to catch possible deadlocks, - // then wait for the exporter to finish. - startShutdown := make(chan struct{}) - doneShutdown := make(chan struct{}) - go func() { - close(startShutdown) - assert.NoError(t, be.Shutdown(context.Background())) - close(doneShutdown) - }() - <-startShutdown - close(blockMerge) - <-doneShutdown - - assert.EqualValues(t, 1, sink.requestsCount.Load()) - assert.EqualValues(t, 8, sink.itemsCount.Load()) -} +// func TestBatchSender_ShutdownDeadlock(t *testing.T) { +// blockMerge := make(chan struct{}) +// waitMerge := make(chan struct{}, 10) + +// // blockedBatchMergeFunc blocks until the blockMerge channel is closed +// blockedBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { +// waitMerge <- struct{}{} +// <-blockMerge +// r1.(*fakeRequest).items += r2.(*fakeRequest).items +// return r1, nil +// } + +// bCfg := exporterbatcher.NewDefaultConfig() +// bCfg.FlushTimeout = 10 * time.Minute // high timeout to avoid the timeout to trigger +// be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, +// WithBatcher(bCfg)) +// require.NoError(t, err) +// require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) + +// sink := newFakeRequestSink() + +// // Send 2 concurrent requests +// go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() +// go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + +// // Wait for the requests to enter the merge function +// <-waitMerge + +// // Initiate the exporter shutdown, unblock the batch merge function to catch possible deadlocks, +// // then wait for the exporter to finish. +// startShutdown := make(chan struct{}) +// doneShutdown := make(chan struct{}) +// go func() { +// close(startShutdown) +// assert.NoError(t, be.Shutdown(context.Background())) +// close(doneShutdown) +// }() +// <-startShutdown +// close(blockMerge) +// <-doneShutdown + +// assert.EqualValues(t, 1, sink.requestsCount.Load()) +// assert.EqualValues(t, 8, sink.itemsCount.Load()) +// } func TestBatchSenderWithTimeout(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() @@ -576,7 +531,6 @@ func TestBatchSenderWithTimeout(t *testing.T) { tCfg := NewDefaultTimeoutConfig() tCfg.Timeout = 50 * time.Millisecond be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(bCfg), WithTimeout(tCfg)) require.NoError(t, err) @@ -614,51 +568,50 @@ func TestBatchSenderWithTimeout(t *testing.T) { assert.EqualValues(t, 12, sink.itemsCount.Load()) } -func TestBatchSenderTimerResetNoConflict(t *testing.T) { - delayBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { - time.Sleep(30 * time.Millisecond) - if r1 == nil { - return r2, nil - } - fr1 := r1.(*fakeRequest) - fr2 := r2.(*fakeRequest) - if fr2.mergeErr != nil { - return nil, fr2.mergeErr - } - return &fakeRequest{ - items: fr1.items + fr2.items, - sink: fr1.sink, - exportErr: fr2.exportErr, - delay: fr1.delay + fr2.delay, - }, nil - } - bCfg := exporterbatcher.NewDefaultConfig() - bCfg.MinSizeItems = 8 - bCfg.FlushTimeout = 50 * time.Millisecond - be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(delayBatchMergeFunc, fakeBatchMergeSplitFunc), - WithBatcher(bCfg)) - require.NoError(t, err) - require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - sink := newFakeRequestSink() - - // Send 2 concurrent requests that should be merged in one batch in the same interval as the flush timer - go func() { - assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) - }() - time.Sleep(30 * time.Millisecond) - go func() { - assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) - }() - - // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict - assert.EventuallyWithT(t, func(c *assert.CollectT) { - assert.LessOrEqual(c, int64(1), sink.requestsCount.Load()) - assert.EqualValues(c, 8, sink.itemsCount.Load()) - }, 200*time.Millisecond, 10*time.Millisecond) - - require.NoError(t, be.Shutdown(context.Background())) -} +// func TestBatchSenderTimerResetNoConflict(t *testing.T) { +// delayBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { +// time.Sleep(30 * time.Millisecond) +// if r1 == nil { +// return r2, nil +// } +// fr1 := r1.(*fakeRequest) +// fr2 := r2.(*fakeRequest) +// if fr2.mergeErr != nil { +// return nil, fr2.mergeErr +// } +// return &fakeRequest{ +// items: fr1.items + fr2.items, +// sink: fr1.sink, +// exportErr: fr2.exportErr, +// delay: fr1.delay + fr2.delay, +// }, nil +// } +// bCfg := exporterbatcher.NewDefaultConfig() +// bCfg.MinSizeItems = 8 +// bCfg.FlushTimeout = 50 * time.Millisecond +// be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, +// WithBatcher(bCfg)) +// require.NoError(t, err) +// require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) +// sink := newFakeRequestSink() + +// // Send 2 concurrent requests that should be merged in one batch in the same interval as the flush timer +// go func() { +// assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) +// }() +// time.Sleep(30 * time.Millisecond) +// go func() { +// assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) +// }() + +// // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict +// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// assert.LessOrEqual(c, int64(1), sink.requestsCount.Load()) +// assert.EqualValues(c, 8, sink.itemsCount.Load()) +// }, 200*time.Millisecond, 10*time.Millisecond) + +// require.NoError(t, be.Shutdown(context.Background())) +// } func TestBatchSenderTimerFlush(t *testing.T) { if runtime.GOOS == "windows" { @@ -668,7 +621,6 @@ func TestBatchSenderTimerFlush(t *testing.T) { bCfg.MinSizeItems = 8 bCfg.FlushTimeout = 100 * time.Millisecond be, err := NewBaseExporter(defaultSettings, defaultSignal, newNoopObsrepSender, - WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(bCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -711,65 +663,3 @@ func queueBatchExporter(t *testing.T, opts ...Option) *BaseExporter { require.NoError(t, err) return be } - -func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { - if r1 == nil { - return r2, nil - } - fr1 := r1.(*fakeRequest) - fr2 := r2.(*fakeRequest) - if fr2.mergeErr != nil { - return nil, fr2.mergeErr - } - return &fakeRequest{ - items: fr1.items + fr2.items, - sink: fr1.sink, - exportErr: fr2.exportErr, - delay: fr1.delay + fr2.delay, - }, nil -} - -func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) { - maxItems := cfg.MaxSizeItems - if maxItems == 0 { - r, err := fakeBatchMergeFunc(ctx, r1, r2) - return []internal.Request{r}, err - } - - if r2.(*fakeRequest).mergeErr != nil { - return nil, r2.(*fakeRequest).mergeErr - } - - fr2 := r2.(*fakeRequest) - fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} - var res []internal.Request - - // fill fr1 to maxItems if it's not nil - if r1 != nil { - fr1 := r1.(*fakeRequest) - fr1 = &fakeRequest{items: fr1.items, sink: fr1.sink, exportErr: fr1.exportErr, delay: fr1.delay} - if fr2.items <= maxItems-fr1.items { - fr1.items += fr2.items - if fr2.exportErr != nil { - fr1.exportErr = fr2.exportErr - } - return []internal.Request{fr1}, nil - } - // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases - fr2.items -= maxItems - fr1.items - fr1.items = maxItems - res = append(res, fr1) - } - - // split fr2 to maxItems - for { - if fr2.items <= maxItems { - res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) - break - } - res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) - fr2.items -= maxItems - } - - return res, nil -} diff --git a/exporter/exporterhelper/internal/request.go b/exporter/exporterhelper/internal/request.go index 0ae94fcf45e..7f71d7e94ea 100644 --- a/exporter/exporterhelper/internal/request.go +++ b/exporter/exporterhelper/internal/request.go @@ -8,6 +8,7 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -55,6 +56,75 @@ func (r *fakeRequest) ItemsCount() int { return r.items } +func (r *fakeRequest) Merge(_ context.Context, + r2 internal.Request) (internal.Request, error) { + if r == nil { + return r2, nil + } + fr2 := r2.(*fakeRequest) + if fr2.mergeErr != nil { + return nil, fr2.mergeErr + } + return &fakeRequest{ + items: r.items + fr2.items, + sink: r.sink, + exportErr: fr2.exportErr, + delay: r.delay + fr2.delay, + }, nil +} + +func (r *fakeRequest) MergeSplit(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, + r2 internal.Request) ([]internal.Request, error) { + if r.mergeErr != nil { + return nil, r.mergeErr + } + + maxItems := cfg.MaxSizeItems + if maxItems == 0 { + r, err := r.Merge(ctx, r2) + return []internal.Request{r}, err + } + + var fr2 *fakeRequest + if r2 == nil { + fr2 = &fakeRequest{sink: r.sink, exportErr: r.exportErr, delay: r.delay} + } else { + if r2.(*fakeRequest).mergeErr != nil { + return nil, r2.(*fakeRequest).mergeErr + } + fr2 = r2.(*fakeRequest) + fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} + } + var res []internal.Request + + // fill fr1 to maxItems if it's not nil + + r = &fakeRequest{items: r.items, sink: r.sink, exportErr: r.exportErr, delay: r.delay} + if fr2.items <= maxItems-r.items { + r.items += fr2.items + if fr2.exportErr != nil { + r.exportErr = fr2.exportErr + } + return []internal.Request{r}, nil + } + // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases + fr2.items -= maxItems - r.items + r.items = maxItems + res = append(res, r) + + // split fr2 to maxItems + for { + if fr2.items <= maxItems { + res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + break + } + res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + fr2.items -= maxItems + } + + return res, nil +} + type FakeRequestConverter struct { MetricsError error TracesError error diff --git a/exporter/exporterhelper/internal/retry_sender_test.go b/exporter/exporterhelper/internal/retry_sender_test.go index bdb80d326c4..9ebf4b1f5ad 100644 --- a/exporter/exporterhelper/internal/retry_sender_test.go +++ b/exporter/exporterhelper/internal/retry_sender_test.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" "go.opentelemetry.io/collector/exporter/internal" @@ -348,6 +349,14 @@ func (mer *mockErrorRequest) ItemsCount() int { return 7 } +func (mer *mockErrorRequest) Merge(context.Context, internal.Request) (internal.Request, error) { + return nil, nil +} + +func (mer *mockErrorRequest) MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, internal.Request) ([]internal.Request, error) { + return nil, nil +} + func newErrorRequest() internal.Request { return &mockErrorRequest{} } @@ -390,6 +399,14 @@ func (m *mockRequest) ItemsCount() int { return m.cnt } +func (m *mockRequest) Merge(context.Context, internal.Request) (internal.Request, error) { + return nil, nil +} + +func (m *mockRequest) MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, internal.Request) ([]internal.Request, error) { + return nil, nil +} + func newMockRequest(cnt int, consumeError error) *mockRequest { return &mockRequest{ cnt: cnt, diff --git a/exporter/exporterhelper/logs.go b/exporter/exporterhelper/logs.go index 4f5b977b2e5..772a5673e24 100644 --- a/exporter/exporterhelper/logs.go +++ b/exporter/exporterhelper/logs.go @@ -86,7 +86,6 @@ func NewLogs( } logsOpts := []Option{ internal.WithMarshaler(logsRequestMarshaler), internal.WithUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), - internal.WithBatchFuncs(mergeLogs, mergeSplitLogs), } return NewLogsRequest(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) } diff --git a/exporter/exporterhelper/logs_batch.go b/exporter/exporterhelper/logs_batch.go index 296538bc0e0..cb296fda95f 100644 --- a/exporter/exporterhelper/logs_batch.go +++ b/exporter/exporterhelper/logs_batch.go @@ -12,24 +12,23 @@ import ( ) // mergeLogs merges two logs requests into one. -func mergeLogs(_ context.Context, r1 Request, r2 Request) (Request, error) { - lr1, ok1 := r1.(*logsRequest) +func (req *logsRequest) Merge(_ context.Context, r2 Request) (Request, error) { lr2, ok2 := r2.(*logsRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - lr2.ld.ResourceLogs().MoveAndAppendTo(lr1.ld.ResourceLogs()) - return lr1, nil + lr2.ld.ResourceLogs().MoveAndAppendTo(req.ld.ResourceLogs()) + return req, nil } // mergeSplitLogs splits and/or merges the logs into multiple requests based on the MaxSizeConfig. -func mergeSplitLogs(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func (req *logsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *logsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/exporter/exporterhelper/logs_batch_test.go b/exporter/exporterhelper/logs_batch_test.go index f5e10b5bcc9..92a18bd864b 100644 --- a/exporter/exporterhelper/logs_batch_test.go +++ b/exporter/exporterhelper/logs_batch_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/testdata" ) @@ -18,7 +19,7 @@ import ( func TestMergeLogs(t *testing.T) { lr1 := &logsRequest{ld: testdata.GenerateLogs(2)} lr2 := &logsRequest{ld: testdata.GenerateLogs(3)} - res, err := mergeLogs(context.Background(), lr1, lr2) + res, err := lr1.Merge(context.Background(), lr2) require.NoError(t, err) assert.Equal(t, 5, res.(*logsRequest).ld.LogRecordCount()) } @@ -26,7 +27,7 @@ func TestMergeLogs(t *testing.T) { func TestMergeLogsInvalidInput(t *testing.T) { lr1 := &tracesRequest{td: testdata.GenerateTraces(2)} lr2 := &logsRequest{ld: testdata.GenerateLogs(3)} - _, err := mergeLogs(context.Background(), lr1, lr2) + _, err := lr1.Merge(context.Background(), lr2) assert.Error(t, err) } @@ -34,8 +35,8 @@ func TestMergeSplitLogs(t *testing.T) { tests := []struct { name string cfg exporterbatcher.MaxSizeConfig - lr1 Request - lr2 Request + lr1 internal.Request + lr2 internal.Request expected []*logsRequest }{ { @@ -45,13 +46,6 @@ func TestMergeSplitLogs(t *testing.T) { lr2: &logsRequest{ld: plog.NewLogs()}, expected: []*logsRequest{{ld: plog.NewLogs()}}, }, - { - name: "both_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - lr1: nil, - lr2: nil, - expected: []*logsRequest{}, - }, { name: "first_request_empty", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, @@ -60,17 +54,10 @@ func TestMergeSplitLogs(t *testing.T) { expected: []*logsRequest{{ld: testdata.GenerateLogs(5)}}, }, { - name: "first_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - lr1: nil, - lr2: &logsRequest{ld: testdata.GenerateLogs(5)}, - expected: []*logsRequest{{ld: testdata.GenerateLogs(5)}}, - }, - { - name: "first_nil_second_empty", + name: "first_empty_second_nil", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - lr1: nil, - lr2: &logsRequest{ld: plog.NewLogs()}, + lr1: &logsRequest{ld: plog.NewLogs()}, + lr2: nil, expected: []*logsRequest{{ld: plog.NewLogs()}}, }, { @@ -87,7 +74,7 @@ func TestMergeSplitLogs(t *testing.T) { { name: "split_only", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 4}, - lr1: nil, + lr1: &logsRequest{ld: plog.NewLogs()}, lr2: &logsRequest{ld: testdata.GenerateLogs(10)}, expected: []*logsRequest{ {ld: testdata.GenerateLogs(4)}, @@ -132,7 +119,7 @@ func TestMergeSplitLogs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res, err := mergeSplitLogs(context.Background(), tt.cfg, tt.lr1, tt.lr2) + res, err := tt.lr1.MergeSplit(context.Background(), tt.cfg, tt.lr2) require.NoError(t, err) assert.Equal(t, len(tt.expected), len(res)) for i, r := range res { @@ -146,7 +133,7 @@ func TestMergeSplitLogs(t *testing.T) { func TestMergeSplitLogsInvalidInput(t *testing.T) { r1 := &tracesRequest{td: testdata.GenerateTraces(2)} r2 := &logsRequest{ld: testdata.GenerateLogs(3)} - _, err := mergeSplitLogs(context.Background(), exporterbatcher.MaxSizeConfig{}, r1, r2) + _, err := r1.MergeSplit(context.Background(), exporterbatcher.MaxSizeConfig{}, r2) assert.Error(t, err) } diff --git a/exporter/exporterhelper/metrics.go b/exporter/exporterhelper/metrics.go index 64557029ce7..b2da8895f98 100644 --- a/exporter/exporterhelper/metrics.go +++ b/exporter/exporterhelper/metrics.go @@ -86,7 +86,6 @@ func NewMetrics( } metricsOpts := []Option{ internal.WithMarshaler(metricsRequestMarshaler), internal.WithUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), - internal.WithBatchFuncs(mergeMetrics, mergeSplitMetrics), } return NewMetricsRequest(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) } diff --git a/exporter/exporterhelper/metrics_batch.go b/exporter/exporterhelper/metrics_batch.go index 1a6448c8496..6721563eac4 100644 --- a/exporter/exporterhelper/metrics_batch.go +++ b/exporter/exporterhelper/metrics_batch.go @@ -12,24 +12,23 @@ import ( ) // mergeMetrics merges two metrics requests into one. -func mergeMetrics(_ context.Context, r1 Request, r2 Request) (Request, error) { - mr1, ok1 := r1.(*metricsRequest) +func (req *metricsRequest) Merge(_ context.Context, r2 Request) (Request, error) { mr2, ok2 := r2.(*metricsRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - mr2.md.ResourceMetrics().MoveAndAppendTo(mr1.md.ResourceMetrics()) - return mr1, nil + mr2.md.ResourceMetrics().MoveAndAppendTo(req.md.ResourceMetrics()) + return req, nil } // mergeSplitMetrics splits and/or merges the metrics into multiple requests based on the MaxSizeConfig. -func mergeSplitMetrics(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func (req *metricsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *metricsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/exporter/exporterhelper/metrics_batch_test.go b/exporter/exporterhelper/metrics_batch_test.go index 860a1eee9c3..854cc59db3a 100644 --- a/exporter/exporterhelper/metrics_batch_test.go +++ b/exporter/exporterhelper/metrics_batch_test.go @@ -18,7 +18,7 @@ import ( func TestMergeMetrics(t *testing.T) { mr1 := &metricsRequest{md: testdata.GenerateMetrics(2)} mr2 := &metricsRequest{md: testdata.GenerateMetrics(3)} - res, err := mergeMetrics(context.Background(), mr1, mr2) + res, err := mr1.Merge(context.Background(), mr2) require.NoError(t, err) assert.Equal(t, 5, res.(*metricsRequest).md.MetricCount()) } @@ -26,7 +26,7 @@ func TestMergeMetrics(t *testing.T) { func TestMergeMetricsInvalidInput(t *testing.T) { mr1 := &tracesRequest{td: testdata.GenerateTraces(2)} mr2 := &metricsRequest{md: testdata.GenerateMetrics(3)} - _, err := mergeMetrics(context.Background(), mr1, mr2) + _, err := mr1.Merge(context.Background(), mr2) assert.Error(t, err) } @@ -45,13 +45,6 @@ func TestMergeSplitMetrics(t *testing.T) { mr2: &metricsRequest{md: pmetric.NewMetrics()}, expected: []*metricsRequest{{md: pmetric.NewMetrics()}}, }, - { - name: "both_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - mr1: nil, - mr2: nil, - expected: []*metricsRequest{}, - }, { name: "first_request_empty", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, @@ -60,17 +53,10 @@ func TestMergeSplitMetrics(t *testing.T) { expected: []*metricsRequest{{md: testdata.GenerateMetrics(5)}}, }, { - name: "first_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - mr1: nil, - mr2: &metricsRequest{md: testdata.GenerateMetrics(5)}, - expected: []*metricsRequest{{md: testdata.GenerateMetrics(5)}}, - }, - { - name: "first_nil_second_empty", + name: "first_empty_second_nil", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - mr1: nil, - mr2: &metricsRequest{md: pmetric.NewMetrics()}, + mr1: &metricsRequest{md: pmetric.NewMetrics()}, + mr2: nil, expected: []*metricsRequest{{md: pmetric.NewMetrics()}}, }, { @@ -87,7 +73,7 @@ func TestMergeSplitMetrics(t *testing.T) { { name: "split_only", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 14}, - mr1: nil, + mr1: &metricsRequest{md: pmetric.NewMetrics()}, mr2: &metricsRequest{md: testdata.GenerateMetrics(15)}, // 15 metrics, 30 data points expected: []*metricsRequest{ {md: testdata.GenerateMetrics(7)}, // 7 metrics, 14 data points @@ -133,7 +119,7 @@ func TestMergeSplitMetrics(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res, err := mergeSplitMetrics(context.Background(), tt.cfg, tt.mr1, tt.mr2) + res, err := tt.mr1.MergeSplit(context.Background(), tt.cfg, tt.mr2) require.NoError(t, err) assert.Equal(t, len(tt.expected), len(res)) for i := range res { @@ -146,7 +132,7 @@ func TestMergeSplitMetrics(t *testing.T) { func TestMergeSplitMetricsInvalidInput(t *testing.T) { r1 := &tracesRequest{td: testdata.GenerateTraces(2)} r2 := &metricsRequest{md: testdata.GenerateMetrics(3)} - _, err := mergeSplitMetrics(context.Background(), exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, r1, r2) + _, err := r1.MergeSplit(context.Background(), exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, r2) assert.Error(t, err) } diff --git a/exporter/exporterhelper/traces.go b/exporter/exporterhelper/traces.go index 407af781feb..7d7bedbd289 100644 --- a/exporter/exporterhelper/traces.go +++ b/exporter/exporterhelper/traces.go @@ -86,7 +86,6 @@ func NewTraces( } tracesOpts := []Option{ internal.WithMarshaler(tracesRequestMarshaler), internal.WithUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), - internal.WithBatchFuncs(mergeTraces, mergeSplitTraces), } return NewTracesRequest(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) } diff --git a/exporter/exporterhelper/traces_batch.go b/exporter/exporterhelper/traces_batch.go index 1bdada95b7b..ec8a3954610 100644 --- a/exporter/exporterhelper/traces_batch.go +++ b/exporter/exporterhelper/traces_batch.go @@ -12,24 +12,23 @@ import ( ) // mergeTraces merges two traces requests into one. -func mergeTraces(_ context.Context, r1 Request, r2 Request) (Request, error) { - tr1, ok1 := r1.(*tracesRequest) +func (req *tracesRequest) Merge(_ context.Context, r2 Request) (Request, error) { tr2, ok2 := r2.(*tracesRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - tr2.td.ResourceSpans().MoveAndAppendTo(tr1.td.ResourceSpans()) - return tr1, nil + tr2.td.ResourceSpans().MoveAndAppendTo(req.td.ResourceSpans()) + return req, nil } // mergeSplitTraces splits and/or merges the traces into multiple requests based on the MaxSizeConfig. -func mergeSplitTraces(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func (req *tracesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *tracesRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/exporter/exporterhelper/traces_batch_test.go b/exporter/exporterhelper/traces_batch_test.go index d88591b3091..ca83c5cfb91 100644 --- a/exporter/exporterhelper/traces_batch_test.go +++ b/exporter/exporterhelper/traces_batch_test.go @@ -18,7 +18,7 @@ import ( func TestMergeTraces(t *testing.T) { tr1 := &tracesRequest{td: testdata.GenerateTraces(2)} tr2 := &tracesRequest{td: testdata.GenerateTraces(3)} - res, err := mergeTraces(context.Background(), tr1, tr2) + res, err := tr1.Merge(context.Background(), tr2) require.NoError(t, err) assert.Equal(t, 5, res.(*tracesRequest).td.SpanCount()) } @@ -26,7 +26,7 @@ func TestMergeTraces(t *testing.T) { func TestMergeTracesInvalidInput(t *testing.T) { tr1 := &logsRequest{ld: testdata.GenerateLogs(2)} tr2 := &tracesRequest{td: testdata.GenerateTraces(3)} - _, err := mergeTraces(context.Background(), tr1, tr2) + _, err := tr1.Merge(context.Background(), tr2) assert.Error(t, err) } @@ -45,13 +45,6 @@ func TestMergeSplitTraces(t *testing.T) { tr2: &tracesRequest{td: ptrace.NewTraces()}, expected: []*tracesRequest{{td: ptrace.NewTraces()}}, }, - { - name: "both_requests_nil", - cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - tr1: nil, - tr2: nil, - expected: []*tracesRequest{}, - }, { name: "first_request_empty", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, @@ -67,10 +60,10 @@ func TestMergeSplitTraces(t *testing.T) { expected: []*tracesRequest{{td: testdata.GenerateTraces(5)}}, }, { - name: "first_nil_second_empty", + name: "first_empty_second_nil", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, - tr1: nil, - tr2: &tracesRequest{td: ptrace.NewTraces()}, + tr1: &tracesRequest{td: ptrace.NewTraces()}, + tr2: nil, expected: []*tracesRequest{{td: ptrace.NewTraces()}}, }, { @@ -87,7 +80,7 @@ func TestMergeSplitTraces(t *testing.T) { { name: "split_only", cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 4}, - tr1: nil, + tr1: &tracesRequest{td: ptrace.NewTraces()}, tr2: &tracesRequest{td: testdata.GenerateTraces(10)}, expected: []*tracesRequest{ {td: testdata.GenerateTraces(4)}, @@ -133,7 +126,7 @@ func TestMergeSplitTraces(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res, err := mergeSplitTraces(context.Background(), tt.cfg, tt.tr1, tt.tr2) + res, err := tt.tr1.MergeSplit(context.Background(), tt.cfg, tt.tr2) require.NoError(t, err) assert.Equal(t, len(tt.expected), len(res)) for i := range res { @@ -146,7 +139,7 @@ func TestMergeSplitTraces(t *testing.T) { func TestMergeSplitTracesInvalidInput(t *testing.T) { r1 := &tracesRequest{td: testdata.GenerateTraces(2)} r2 := &metricsRequest{md: testdata.GenerateMetrics(3)} - _, err := mergeSplitTraces(context.Background(), exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, r1, r2) + _, err := r1.MergeSplit(context.Background(), exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, r2) assert.Error(t, err) } diff --git a/exporter/exporterprofiles/go.mod b/exporter/exporterprofiles/go.mod index b70e14af842..b6988e8fa81 100644 --- a/exporter/exporterprofiles/go.mod +++ b/exporter/exporterprofiles/go.mod @@ -19,7 +19,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect @@ -66,4 +66,6 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receive replace go.opentelemetry.io/collector/pipeline => ../../pipeline +replace go.opentelemetry.io/collector/exporter/exportertest => ../exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../../consumer/consumererror diff --git a/exporter/exportertest/Makefile b/exporter/exportertest/Makefile new file mode 100644 index 00000000000..c1496226e59 --- /dev/null +++ b/exporter/exportertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/exporter/exportertest/go.mod b/exporter/exportertest/go.mod new file mode 100644 index 00000000000..3a91bc3de50 --- /dev/null +++ b/exporter/exportertest/go.mod @@ -0,0 +1,84 @@ +module go.opentelemetry.io/collector/exporter/exportertest + +go 1.22.0 + +require ( + github.com/google/uuid v1.6.0 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.111.0 + go.opentelemetry.io/collector/config/configretry v1.17.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumertest v0.111.0 + go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 + go.opentelemetry.io/collector/pdata v1.17.0 + go.opentelemetry.io/collector/pdata/pprofile v0.111.0 + go.opentelemetry.io/collector/pipeline v0.111.0 + go.opentelemetry.io/collector/receiver v0.111.0 + google.golang.org/grpc v1.67.1 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/extension v0.111.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.17.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace go.opentelemetry.io/collector/exporter => ../../exporter + +replace go.opentelemetry.io/collector/extension/experimental/storage => ../../extension/experimental/storage + +replace go.opentelemetry.io/collector/pdata/pprofile => ../../pdata/pprofile + +replace go.opentelemetry.io/collector/receiver => ../../receiver + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest + +replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles + +replace go.opentelemetry.io/collector/extension => ../../extension + +replace go.opentelemetry.io/collector/config/configtelemetry => ../../config/configtelemetry + +replace go.opentelemetry.io/collector/config/configretry => ../../config/configretry + +replace go.opentelemetry.io/collector/pipeline => ../../pipeline + +replace go.opentelemetry.io/collector/pdata => ../../pdata + +replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/component => ../../component + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/pdata/testdata => ../../pdata/testdata + +replace go.opentelemetry.io/collector/consumer => ../../consumer + +replace go.opentelemetry.io/collector/consumer/consumererror => ../../consumer/consumererror diff --git a/exporter/exportertest/go.sum b/exporter/exportertest/go.sum new file mode 100644 index 00000000000..fe17027b364 --- /dev/null +++ b/exporter/exportertest/go.sum @@ -0,0 +1,98 @@ +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporter/go.mod b/exporter/go.mod index cfcd58d0f34..a5296306881 100644 --- a/exporter/go.mod +++ b/exporter/go.mod @@ -4,22 +4,20 @@ go 1.22.0 require ( github.com/cenkalti/backoff/v4 v4.3.0 - github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configretry v1.17.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 - go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/extension v0.111.0 go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/receiver v0.111.0 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/sdk v1.31.0 @@ -28,7 +26,6 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/grpc v1.67.1 ) require ( @@ -36,16 +33,20 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/receiver v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -82,4 +83,6 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../receiver/r replace go.opentelemetry.io/collector/exporter/exporterprofiles => ./exporterprofiles +replace go.opentelemetry.io/collector/exporter/exportertest => ./exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../consumer/consumererror diff --git a/exporter/internal/request.go b/exporter/internal/request.go index 1b82e23504d..ed6ee39af1c 100644 --- a/exporter/internal/request.go +++ b/exporter/internal/request.go @@ -5,6 +5,8 @@ package internal // import "go.opentelemetry.io/collector/exporter/internal" import ( "context" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" ) // Request represents a single request that can be sent to an external endpoint. @@ -17,6 +19,21 @@ type Request interface { // sent. For example, for OTLP exporter, this value represents the number of spans, // metric data points or log records. ItemsCount() int + // Merge() is a function that merges this request with another one into a single request. + // Do not mutate the requests passed to the function if error can be returned after mutation or if the exporter is + // marked as not mutable. + // Experimental: This API is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. + Merge(context.Context, Request) (Request, error) + // MergeSplit() is a function that merge and/or splits this request with another one into multiple requests based on the + // configured limit provided in MaxSizeConfig. + // All the returned requests MUST have a number of items that does not exceed the maximum number of items. + // Size of the last returned request MUST be less or equal than the size of any other returned request. + // The original request MUST not be mutated if error is returned after mutation or if the exporter is + // marked as not mutable. The length of the returned slice MUST not be 0. + // Experimental: This API is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. + MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, Request) ([]Request, error) } // RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial diff --git a/exporter/nopexporter/go.mod b/exporter/nopexporter/go.mod index fb1a8261bc6..5566e96cb53 100644 --- a/exporter/nopexporter/go.mod +++ b/exporter/nopexporter/go.mod @@ -8,6 +8,7 @@ require ( go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.uber.org/goleak v1.3.0 ) @@ -29,8 +30,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect @@ -87,4 +88,6 @@ replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterpr replace go.opentelemetry.io/collector/pipeline => ../../pipeline +replace go.opentelemetry.io/collector/exporter/exportertest => ../exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../../consumer/consumererror diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index e5ddaf628f2..1e56a02d245 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -13,11 +13,12 @@ require ( go.opentelemetry.io/collector/config/configretry v1.17.0 go.opentelemetry.io/collector/config/configtls v1.17.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter v0.111.0 - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 @@ -53,14 +54,14 @@ require ( go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect go.opentelemetry.io/collector/config/internal v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect go.opentelemetry.io/collector/extension v0.111.0 // indirect go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect go.opentelemetry.io/collector/pipeline v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/receiver v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect @@ -144,3 +145,5 @@ retract ( v0.76.0 // Depends on retracted pdata v1.0.0-rc10 module, use v0.76.1 v0.69.0 // Release failed, use v0.69.1 ) + +replace go.opentelemetry.io/collector/exporter/exportertest => ../exportertest diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index 79136a29487..7b014ff176c 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -12,11 +12,12 @@ require ( go.opentelemetry.io/collector/config/configretry v1.17.0 go.opentelemetry.io/collector/config/configtls v1.17.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter v0.111.0 - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.uber.org/goleak v1.3.0 @@ -53,14 +54,14 @@ require ( go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect go.opentelemetry.io/collector/config/internal v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect go.opentelemetry.io/collector/extension v0.111.0 // indirect go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect go.opentelemetry.io/collector/pipeline v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/receiver v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect @@ -142,3 +143,5 @@ retract ( v0.76.0 // Depends on retracted pdata v1.0.0-rc10 module, use v0.76.1 v0.69.0 // Release failed, use v0.69.1 ) + +replace go.opentelemetry.io/collector/exporter/exportertest => ../exportertest diff --git a/go.mod b/go.mod index a371497d8e0..04e02e74c54 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/component/componentstatus v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 diff --git a/internal/e2e/go.mod b/internal/e2e/go.mod index 6bcaf0ec560..3185d7914ee 100644 --- a/internal/e2e/go.mod +++ b/internal/e2e/go.mod @@ -15,10 +15,11 @@ require ( go.opentelemetry.io/collector/config/configtls v1.17.0 go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/connector v0.111.0 - go.opentelemetry.io/collector/connector/connectortest v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.111.0 go.opentelemetry.io/collector/extension v0.111.0 @@ -79,20 +80,20 @@ require ( go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect go.opentelemetry.io/collector/config/internal v0.111.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect go.opentelemetry.io/collector/featuregate v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/processor v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.111.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/collector/semconv v0.111.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect @@ -215,6 +216,8 @@ replace go.opentelemetry.io/collector/pipeline => ../../pipeline replace go.opentelemetry.io/collector/pipeline/pipelineprofiles => ../../pipeline/pipelineprofiles +replace go.opentelemetry.io/collector/exporter/exportertest => ../../exporter/exportertest + replace go.opentelemetry.io/collector/processor/processortest => ../../processor/processortest replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/otelcol/go.mod b/otelcol/go.mod index c3f8aa42d5d..8e05fa818d6 100644 --- a/otelcol/go.mod +++ b/otelcol/go.mod @@ -10,13 +10,14 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/connector v0.111.0 - go.opentelemetry.io/collector/connector/connectortest v0.111.0 + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/extension v0.111.0 go.opentelemetry.io/collector/featuregate v1.17.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/receiver v0.111.0 go.opentelemetry.io/collector/service v0.111.0 go.opentelemetry.io/contrib/config v0.10.0 @@ -68,8 +69,8 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector v0.111.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect @@ -77,7 +78,7 @@ require ( go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/collector/semconv v0.111.0 // indirect @@ -184,6 +185,8 @@ replace go.opentelemetry.io/collector/pipeline => ../pipeline replace go.opentelemetry.io/collector/pipeline/pipelineprofiles => ../pipeline/pipelineprofiles +replace go.opentelemetry.io/collector/exporter/exportertest => ../exporter/exportertest + replace go.opentelemetry.io/collector/processor/processortest => ../processor/processortest replace go.opentelemetry.io/collector/consumer/consumererror => ../consumer/consumererror diff --git a/otelcol/otelcoltest/go.mod b/otelcol/otelcoltest/go.mod index f4a47c1555a..d2728f892d7 100644 --- a/otelcol/otelcoltest/go.mod +++ b/otelcol/otelcoltest/go.mod @@ -11,13 +11,14 @@ require ( go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 go.opentelemetry.io/collector/connector v0.111.0 - go.opentelemetry.io/collector/connector/connectortest v0.111.0 + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/extension v0.111.0 go.opentelemetry.io/collector/otelcol v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/receiver v0.111.0 go.opentelemetry.io/collector/service v0.111.0 go.uber.org/goleak v1.3.0 @@ -65,8 +66,8 @@ require ( go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect @@ -75,7 +76,7 @@ require ( go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect go.opentelemetry.io/collector/semconv v0.111.0 // indirect @@ -201,4 +202,6 @@ replace go.opentelemetry.io/collector/pipeline => ../../pipeline replace go.opentelemetry.io/collector/pipeline/pipelineprofiles => ../../pipeline/pipelineprofiles +replace go.opentelemetry.io/collector/exporter/exportertest => ../../exporter/exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../../consumer/consumererror diff --git a/processor/batchprocessor/batch_processor.go b/processor/batchprocessor/batch_processor.go index f598d065d3b..146ee59420f 100644 --- a/processor/batchprocessor/batch_processor.go +++ b/processor/batchprocessor/batch_processor.go @@ -37,7 +37,7 @@ var errTooManyBatchers = consumererror.NewPermanent(errors.New("too many batcher // Batches are sent out with any of the following conditions: // - batch size reaches cfg.SendBatchSize // - cfg.Timeout is elapsed since the timestamp when the previous batch was sent out. -type batchProcessor struct { +type batchProcessor[T any] struct { logger *zap.Logger timeout time.Duration sendBatchSize int @@ -45,16 +45,7 @@ type batchProcessor struct { // batchFunc is a factory for new batch objects corresponding // with the appropriate signal. - batchFunc func() batch - - // metadataKeys is the configured list of metadata keys. When - // empty, the `singleton` batcher is used. When non-empty, - // each distinct combination of metadata keys and values - // triggers a new batcher, counted in `goroutines`. - metadataKeys []string - - // metadataLimit is the limiting size of the batchers map. - metadataLimit int + batchFunc func() batch[T] shutdownC chan struct{} goroutines sync.WaitGroup @@ -62,16 +53,16 @@ type batchProcessor struct { telemetry *batchProcessorTelemetry // batcher will be either *singletonBatcher or *multiBatcher - batcher batcher + batcher batcher[T] } // batcher is describes a *singletonBatcher or *multiBatcher. -type batcher interface { +type batcher[T any] interface { // start initializes background resources used by this batcher. start(ctx context.Context) error // consume incorporates a new item of data into the pending batch. - consume(ctx context.Context, data any) error + consume(ctx context.Context, data T) error // currentMetadataCardinality returns the number of shards. currentMetadataCardinality() int @@ -80,10 +71,10 @@ type batcher interface { // shard is a single instance of the batch logic. When metadata // keys are in use, one of these is created per distinct combination // of values. -type shard struct { +type shard[T any] struct { // processor refers to this processor, for access to common // configuration. - processor *batchProcessor + processor *batchProcessor[T] // exportCtx is a context with the metadata key-values // corresponding with this shard set. @@ -93,44 +84,40 @@ type shard struct { timer *time.Timer // newItem is used to receive data items from producers. - newItem chan any + newItem chan T // batch is an in-flight data item containing one of the // underlying data types. - batch batch + batch batch[T] } // batch is an interface generalizing the individual signal types. -type batch interface { +type batch[T any] interface { // export the current batch - export(ctx context.Context, req any) error + export(ctx context.Context, req T) error - // splitBatch returns a full request built from pending items. - splitBatch(ctx context.Context, sendBatchMaxSize int) (sentBatchSize int, req any) + // split returns a full request built from pending items. + split(sendBatchMaxSize int) (sentBatchSize int, req T) // itemCount returns the size of the current batch itemCount() int // add item to the current batch - add(item any) + add(item T) // sizeBytes counts the OTLP encoding size of the batch - sizeBytes(item any) int + sizeBytes(item T) int } -var _ consumer.Traces = (*batchProcessor)(nil) -var _ consumer.Metrics = (*batchProcessor)(nil) -var _ consumer.Logs = (*batchProcessor)(nil) - // newBatchProcessor returns a new batch processor component. -func newBatchProcessor(set processor.Settings, cfg *Config, batchFunc func() batch) (*batchProcessor, error) { +func newBatchProcessor[T any](set processor.Settings, cfg *Config, batchFunc func() batch[T]) (*batchProcessor[T], error) { // use lower-case, to be consistent with http/2 headers. mks := make([]string, len(cfg.MetadataKeys)) for i, k := range cfg.MetadataKeys { mks[i] = strings.ToLower(k) } sort.Strings(mks) - bp := &batchProcessor{ + bp := &batchProcessor[T]{ logger: set.Logger, sendBatchSize: int(cfg.SendBatchSize), @@ -138,17 +125,16 @@ func newBatchProcessor(set processor.Settings, cfg *Config, batchFunc func() bat timeout: cfg.Timeout, batchFunc: batchFunc, shutdownC: make(chan struct{}, 1), - metadataKeys: mks, - metadataLimit: int(cfg.MetadataCardinalityLimit), } - if len(bp.metadataKeys) == 0 { - bp.batcher = &singleShardBatcher{ + if len(mks) == 0 { + bp.batcher = &singleShardBatcher[T]{ processor: bp, - single: nil, // created in start } } else { - bp.batcher = &multiShardBatcher{ - processor: bp, + bp.batcher = &multiShardBatcher[T]{ + metadataKeys: mks, + metadataLimit: int(cfg.MetadataCardinalityLimit), + processor: bp, } } @@ -162,30 +148,30 @@ func newBatchProcessor(set processor.Settings, cfg *Config, batchFunc func() bat } // newShard gets or creates a batcher corresponding with attrs. -func (bp *batchProcessor) newShard(md map[string][]string) *shard { +func (bp *batchProcessor[T]) newShard(md map[string][]string) *shard[T] { exportCtx := client.NewContext(context.Background(), client.Info{ Metadata: client.NewMetadata(md), }) - b := &shard{ + b := &shard[T]{ processor: bp, - newItem: make(chan any, runtime.NumCPU()), + newItem: make(chan T, runtime.NumCPU()), exportCtx: exportCtx, batch: bp.batchFunc(), } return b } -func (bp *batchProcessor) Capabilities() consumer.Capabilities { +func (bp *batchProcessor[T]) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } // Start is invoked during service startup. -func (bp *batchProcessor) Start(ctx context.Context, _ component.Host) error { +func (bp *batchProcessor[T]) Start(ctx context.Context, _ component.Host) error { return bp.batcher.start(ctx) } // Shutdown is invoked during service shutdown. -func (bp *batchProcessor) Shutdown(context.Context) error { +func (bp *batchProcessor[T]) Shutdown(context.Context) error { close(bp.shutdownC) // Wait until all goroutines are done. @@ -193,12 +179,12 @@ func (bp *batchProcessor) Shutdown(context.Context) error { return nil } -func (b *shard) start() { +func (b *shard[T]) start() { b.processor.goroutines.Add(1) go b.startLoop() } -func (b *shard) startLoop() { +func (b *shard[T]) startLoop() { defer b.processor.goroutines.Done() // timerCh ensures we only block when there is a @@ -228,9 +214,6 @@ func (b *shard) startLoop() { } return case item := <-b.newItem: - if item == nil { - continue - } b.processItem(item) case <-timerCh: if b.batch.itemCount() > 0 { @@ -241,7 +224,7 @@ func (b *shard) startLoop() { } } -func (b *shard) processItem(item any) { +func (b *shard[T]) processItem(item T) { b.batch.add(item) sent := false for b.batch.itemCount() > 0 && (!b.hasTimer() || b.batch.itemCount() >= b.processor.sendBatchSize) { @@ -255,24 +238,24 @@ func (b *shard) processItem(item any) { } } -func (b *shard) hasTimer() bool { +func (b *shard[T]) hasTimer() bool { return b.timer != nil } -func (b *shard) stopTimer() { +func (b *shard[T]) stopTimer() { if b.hasTimer() && !b.timer.Stop() { <-b.timer.C } } -func (b *shard) resetTimer() { +func (b *shard[T]) resetTimer() { if b.hasTimer() { b.timer.Reset(b.processor.timeout) } } -func (b *shard) sendItems(trigger trigger) { - sent, req := b.batch.splitBatch(b.exportCtx, b.processor.sendBatchMaxSize) +func (b *shard[T]) sendItems(trigger trigger) { + sent, req := b.batch.split(b.processor.sendBatchMaxSize) err := b.batch.export(b.exportCtx, req) if err != nil { @@ -288,29 +271,38 @@ func (b *shard) sendItems(trigger trigger) { // singleShardBatcher is used when metadataKeys is empty, to avoid the // additional lock and map operations used in multiBatcher. -type singleShardBatcher struct { - processor *batchProcessor - single *shard +type singleShardBatcher[T any] struct { + processor *batchProcessor[T] + single *shard[T] } -func (sb *singleShardBatcher) start(context.Context) error { +func (sb *singleShardBatcher[T]) start(context.Context) error { sb.single = sb.processor.newShard(nil) sb.single.start() return nil } -func (sb *singleShardBatcher) consume(_ context.Context, data any) error { +func (sb *singleShardBatcher[T]) consume(_ context.Context, data T) error { sb.single.newItem <- data return nil } -func (sb *singleShardBatcher) currentMetadataCardinality() int { +func (sb *singleShardBatcher[T]) currentMetadataCardinality() int { return 1 } -// multiBatcher is used when metadataKeys is not empty. -type multiShardBatcher struct { - processor *batchProcessor +// multiShardBatcher is used when metadataKeys is not empty. +type multiShardBatcher[T any] struct { + // metadataKeys is the configured list of metadata keys. When + // empty, the `singleton` batcher is used. When non-empty, + // each distinct combination of metadata keys and values + // triggers a new batcher, counted in `goroutines`. + metadataKeys []string + + // metadataLimit is the limiting size of the batchers map. + metadataLimit int + + processor *batchProcessor[T] batchers sync.Map // Guards the size and the storing logic to ensure no more than limit items are stored. @@ -319,17 +311,17 @@ type multiShardBatcher struct { size int } -func (mb *multiShardBatcher) start(context.Context) error { +func (mb *multiShardBatcher[T]) start(context.Context) error { return nil } -func (mb *multiShardBatcher) consume(ctx context.Context, data any) error { +func (mb *multiShardBatcher[T]) consume(ctx context.Context, data T) error { // Get each metadata key value, form the corresponding // attribute set for use as a map lookup key. info := client.FromContext(ctx) md := map[string][]string{} var attrs []attribute.KeyValue - for _, k := range mb.processor.metadataKeys { + for _, k := range mb.metadataKeys { // Lookup the value in the incoming metadata, copy it // into the outgoing metadata, and create a unique // value for the attributeSet. @@ -346,7 +338,7 @@ func (mb *multiShardBatcher) consume(ctx context.Context, data any) error { b, ok := mb.batchers.Load(aset) if !ok { mb.lock.Lock() - if mb.processor.metadataLimit != 0 && mb.size >= mb.processor.metadataLimit { + if mb.metadataLimit != 0 && mb.size >= mb.metadataLimit { mb.lock.Unlock() return errTooManyBatchers } @@ -357,49 +349,72 @@ func (mb *multiShardBatcher) consume(ctx context.Context, data any) error { b, loaded = mb.batchers.LoadOrStore(aset, mb.processor.newShard(md)) if !loaded { // Start the goroutine only if we added the object to the map, otherwise is already started. - b.(*shard).start() + b.(*shard[T]).start() mb.size++ } mb.lock.Unlock() } - b.(*shard).newItem <- data + b.(*shard[T]).newItem <- data return nil } -func (mb *multiShardBatcher) currentMetadataCardinality() int { +func (mb *multiShardBatcher[T]) currentMetadataCardinality() int { mb.lock.Lock() defer mb.lock.Unlock() return mb.size } -// ConsumeTraces implements processor.Traces -func (bp *batchProcessor) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { - return bp.batcher.consume(ctx, td) +type tracesBatchProcessor struct { + *batchProcessor[ptrace.Traces] } -// ConsumeMetrics implements processor.Metrics -func (bp *batchProcessor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { - return bp.batcher.consume(ctx, md) +// newTracesBatchProcessor creates a new batch processor that batches traces by size or with timeout +func newTracesBatchProcessor(set processor.Settings, next consumer.Traces, cfg *Config) (processor.Traces, error) { + bp, err := newBatchProcessor(set, cfg, func() batch[ptrace.Traces] { return newBatchTraces(next) }) + if err != nil { + return nil, err + } + return &tracesBatchProcessor{batchProcessor: bp}, nil } -// ConsumeLogs implements processor.Logs -func (bp *batchProcessor) ConsumeLogs(ctx context.Context, ld plog.Logs) error { - return bp.batcher.consume(ctx, ld) +func (t *tracesBatchProcessor) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { + return t.batcher.consume(ctx, td) } -// newBatchTraces creates a new batch processor that batches traces by size or with timeout -func newBatchTracesProcessor(set processor.Settings, next consumer.Traces, cfg *Config) (*batchProcessor, error) { - return newBatchProcessor(set, cfg, func() batch { return newBatchTraces(next) }) +type metricsBatchProcessor struct { + *batchProcessor[pmetric.Metrics] } -// newBatchMetricsProcessor creates a new batch processor that batches metrics by size or with timeout -func newBatchMetricsProcessor(set processor.Settings, next consumer.Metrics, cfg *Config) (*batchProcessor, error) { - return newBatchProcessor(set, cfg, func() batch { return newBatchMetrics(next) }) +// newMetricsBatchProcessor creates a new batch processor that batches metrics by size or with timeout +func newMetricsBatchProcessor(set processor.Settings, next consumer.Metrics, cfg *Config) (processor.Metrics, error) { + bp, err := newBatchProcessor(set, cfg, func() batch[pmetric.Metrics] { return newMetricsBatch(next) }) + if err != nil { + return nil, err + } + return &metricsBatchProcessor{batchProcessor: bp}, nil +} + +// ConsumeMetrics implements processor.Metrics +func (m *metricsBatchProcessor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + return m.batcher.consume(ctx, md) } -// newBatchLogsProcessor creates a new batch processor that batches logs by size or with timeout -func newBatchLogsProcessor(set processor.Settings, next consumer.Logs, cfg *Config) (*batchProcessor, error) { - return newBatchProcessor(set, cfg, func() batch { return newBatchLogs(next) }) +type logsBatchProcessor struct { + *batchProcessor[plog.Logs] +} + +// newLogsBatchProcessor creates a new batch processor that batches logs by size or with timeout +func newLogsBatchProcessor(set processor.Settings, next consumer.Logs, cfg *Config) (processor.Logs, error) { + bp, err := newBatchProcessor(set, cfg, func() batch[plog.Logs] { return newBatchLogs(next) }) + if err != nil { + return nil, err + } + return &logsBatchProcessor{batchProcessor: bp}, nil +} + +// ConsumeLogs implements processor.Logs +func (l *logsBatchProcessor) ConsumeLogs(ctx context.Context, ld plog.Logs) error { + return l.batcher.consume(ctx, ld) } type batchTraces struct { @@ -414,8 +429,7 @@ func newBatchTraces(nextConsumer consumer.Traces) *batchTraces { } // add updates current batchTraces by adding new TraceData object -func (bt *batchTraces) add(item any) { - td := item.(ptrace.Traces) +func (bt *batchTraces) add(td ptrace.Traces) { newSpanCount := td.SpanCount() if newSpanCount == 0 { return @@ -425,29 +439,28 @@ func (bt *batchTraces) add(item any) { td.ResourceSpans().MoveAndAppendTo(bt.traceData.ResourceSpans()) } -func (bt *batchTraces) sizeBytes(data any) int { - return bt.sizer.TracesSize(data.(ptrace.Traces)) +func (bt *batchTraces) sizeBytes(td ptrace.Traces) int { + return bt.sizer.TracesSize(td) } -func (bt *batchTraces) export(ctx context.Context, req any) error { - td := req.(ptrace.Traces) +func (bt *batchTraces) export(ctx context.Context, td ptrace.Traces) error { return bt.nextConsumer.ConsumeTraces(ctx, td) } -func (bt *batchTraces) splitBatch(_ context.Context, sendBatchMaxSize int) (int, any) { - var req ptrace.Traces +func (bt *batchTraces) split(sendBatchMaxSize int) (int, ptrace.Traces) { + var td ptrace.Traces var sent int if sendBatchMaxSize > 0 && bt.itemCount() > sendBatchMaxSize { - req = splitTraces(sendBatchMaxSize, bt.traceData) + td = splitTraces(sendBatchMaxSize, bt.traceData) bt.spanCount -= sendBatchMaxSize sent = sendBatchMaxSize } else { - req = bt.traceData + td = bt.traceData sent = bt.spanCount bt.traceData = ptrace.NewTraces() bt.spanCount = 0 } - return sent, req + return sent, td } func (bt *batchTraces) itemCount() int { @@ -461,43 +474,40 @@ type batchMetrics struct { sizer pmetric.Sizer } -func newBatchMetrics(nextConsumer consumer.Metrics) *batchMetrics { +func newMetricsBatch(nextConsumer consumer.Metrics) *batchMetrics { return &batchMetrics{nextConsumer: nextConsumer, metricData: pmetric.NewMetrics(), sizer: &pmetric.ProtoMarshaler{}} } -func (bm *batchMetrics) sizeBytes(data any) int { - return bm.sizer.MetricsSize(data.(pmetric.Metrics)) +func (bm *batchMetrics) sizeBytes(md pmetric.Metrics) int { + return bm.sizer.MetricsSize(md) } -func (bm *batchMetrics) export(ctx context.Context, req any) error { - md := req.(pmetric.Metrics) +func (bm *batchMetrics) export(ctx context.Context, md pmetric.Metrics) error { return bm.nextConsumer.ConsumeMetrics(ctx, md) } -func (bm *batchMetrics) splitBatch(_ context.Context, sendBatchMaxSize int) (int, any) { - var req pmetric.Metrics +func (bm *batchMetrics) split(sendBatchMaxSize int) (int, pmetric.Metrics) { + var md pmetric.Metrics var sent int if sendBatchMaxSize > 0 && bm.dataPointCount > sendBatchMaxSize { - req = splitMetrics(sendBatchMaxSize, bm.metricData) + md = splitMetrics(sendBatchMaxSize, bm.metricData) bm.dataPointCount -= sendBatchMaxSize sent = sendBatchMaxSize } else { - req = bm.metricData + md = bm.metricData sent = bm.dataPointCount bm.metricData = pmetric.NewMetrics() bm.dataPointCount = 0 } - return sent, req + return sent, md } func (bm *batchMetrics) itemCount() int { return bm.dataPointCount } -func (bm *batchMetrics) add(item any) { - md := item.(pmetric.Metrics) - +func (bm *batchMetrics) add(md pmetric.Metrics) { newDataPointCount := md.DataPointCount() if newDataPointCount == 0 { return @@ -517,39 +527,36 @@ func newBatchLogs(nextConsumer consumer.Logs) *batchLogs { return &batchLogs{nextConsumer: nextConsumer, logData: plog.NewLogs(), sizer: &plog.ProtoMarshaler{}} } -func (bl *batchLogs) sizeBytes(data any) int { - return bl.sizer.LogsSize(data.(plog.Logs)) +func (bl *batchLogs) sizeBytes(ld plog.Logs) int { + return bl.sizer.LogsSize(ld) } -func (bl *batchLogs) export(ctx context.Context, req any) error { - ld := req.(plog.Logs) +func (bl *batchLogs) export(ctx context.Context, ld plog.Logs) error { return bl.nextConsumer.ConsumeLogs(ctx, ld) } -func (bl *batchLogs) splitBatch(_ context.Context, sendBatchMaxSize int) (int, any) { - var req plog.Logs +func (bl *batchLogs) split(sendBatchMaxSize int) (int, plog.Logs) { + var ld plog.Logs var sent int if sendBatchMaxSize > 0 && bl.logCount > sendBatchMaxSize { - req = splitLogs(sendBatchMaxSize, bl.logData) + ld = splitLogs(sendBatchMaxSize, bl.logData) bl.logCount -= sendBatchMaxSize sent = sendBatchMaxSize } else { - req = bl.logData + ld = bl.logData sent = bl.logCount bl.logData = plog.NewLogs() bl.logCount = 0 } - return sent, req + return sent, ld } func (bl *batchLogs) itemCount() int { return bl.logCount } -func (bl *batchLogs) add(item any) { - ld := item.(plog.Logs) - +func (bl *batchLogs) add(ld plog.Logs) { newLogsCount := ld.LogRecordCount() if newLogsCount == 0 { return diff --git a/processor/batchprocessor/batch_processor_test.go b/processor/batchprocessor/batch_processor_test.go index 3d2e6571406..a054bc008bc 100644 --- a/processor/batchprocessor/batch_processor_test.go +++ b/processor/batchprocessor/batch_processor_test.go @@ -86,7 +86,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) { cfg.SendBatchSize = 128 creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -129,7 +129,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { cfg.SendBatchMaxSize = 130 creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -183,7 +183,7 @@ func TestBatchProcessorSentBySize(t *testing.T) { cfg.Timeout = 500 * time.Millisecond creationSet := tel.NewSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -304,7 +304,7 @@ func TestBatchProcessorSentBySizeWithMaxSize(t *testing.T) { cfg.Timeout = 500 * time.Millisecond creationSet := tel.NewSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -442,7 +442,7 @@ func TestBatchProcessorSentByTimeout(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -489,7 +489,7 @@ func TestBatchProcessorTraceSendWhenClosing(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, &cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -520,7 +520,7 @@ func TestBatchMetricProcessor_ReceivingData(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchMetricsProcessor(creationSet, sink, &cfg) + batcher, err := newMetricsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -576,7 +576,7 @@ func TestBatchMetricProcessorBatchSize(t *testing.T) { creationSet := tel.NewSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchMetricsProcessor(creationSet, sink, &cfg) + batcher, err := newMetricsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -686,12 +686,12 @@ func TestBatchMetrics_UnevenBatchMaxSize(t *testing.T) { dataPointsPerMetric := 2 sendBatchMaxSize := 99 - batchMetrics := newBatchMetrics(sink) + batchMetrics := newMetricsBatch(sink) md := testdata.GenerateMetrics(metricsCount) batchMetrics.add(md) require.Equal(t, dataPointsPerMetric*metricsCount, batchMetrics.dataPointCount) - sent, req := batchMetrics.splitBatch(ctx, sendBatchMaxSize) + sent, req := batchMetrics.split(sendBatchMaxSize) sendErr := batchMetrics.export(ctx, req) require.NoError(t, sendErr) require.Equal(t, sendBatchMaxSize, sent) @@ -710,7 +710,7 @@ func TestBatchMetricsProcessor_Timeout(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchMetricsProcessor(creationSet, sink, &cfg) + batcher, err := newMetricsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -759,7 +759,7 @@ func TestBatchMetricProcessor_Shutdown(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchMetricsProcessor(creationSet, sink, &cfg) + batcher, err := newMetricsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -858,7 +858,7 @@ func runMetricsProcessorBenchmark(b *testing.B, cfg Config) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed metricsPerRequest := 1000 - batcher, err := newBatchMetricsProcessor(creationSet, sink, &cfg) + batcher, err := newMetricsBatchProcessor(creationSet, sink, &cfg) require.NoError(b, err) require.NoError(b, batcher.Start(ctx, componenttest.NewNopHost())) @@ -905,7 +905,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -959,7 +959,7 @@ func TestBatchLogProcessor_BatchSize(t *testing.T) { creationSet := tel.NewSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -1073,7 +1073,7 @@ func TestBatchLogsProcessor_Timeout(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -1122,7 +1122,7 @@ func TestBatchLogProcessor_Shutdown(t *testing.T) { creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -1201,7 +1201,7 @@ func TestBatchProcessorSpansBatchedByMetadata(t *testing.T) { cfg.MetadataKeys = []string{"token1", "token2"} creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -1293,7 +1293,7 @@ func TestBatchProcessorMetadataCardinalityLimit(t *testing.T) { cfg.MetadataKeys = []string{"token"} cfg.MetadataCardinalityLimit = cardLimit creationSet := processortest.NewNopSettings() - batcher, err := newBatchTracesProcessor(creationSet, sink, cfg) + batcher, err := newTracesBatchProcessor(creationSet, sink, cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) @@ -1336,7 +1336,7 @@ func TestBatchZeroConfig(t *testing.T) { sink := new(consumertest.LogsSink) creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) defer func() { require.NoError(t, batcher.Shutdown(context.Background())) }() @@ -1377,7 +1377,7 @@ func TestBatchSplitOnly(t *testing.T) { sink := new(consumertest.LogsSink) creationSet := processortest.NewNopSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed - batcher, err := newBatchLogsProcessor(creationSet, sink, &cfg) + batcher, err := newLogsBatchProcessor(creationSet, sink, &cfg) require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) defer func() { require.NoError(t, batcher.Shutdown(context.Background())) }() diff --git a/processor/batchprocessor/factory.go b/processor/batchprocessor/factory.go index 12fcbb9e6ab..2cb52a12c39 100644 --- a/processor/batchprocessor/factory.go +++ b/processor/batchprocessor/factory.go @@ -49,7 +49,7 @@ func createTraces( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - return newBatchTracesProcessor(set, nextConsumer, cfg.(*Config)) + return newTracesBatchProcessor(set, nextConsumer, cfg.(*Config)) } func createMetrics( @@ -58,7 +58,7 @@ func createMetrics( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - return newBatchMetricsProcessor(set, nextConsumer, cfg.(*Config)) + return newMetricsBatchProcessor(set, nextConsumer, cfg.(*Config)) } func createLogs( @@ -67,5 +67,5 @@ func createLogs( cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { - return newBatchLogsProcessor(set, nextConsumer, cfg.(*Config)) + return newLogsBatchProcessor(set, nextConsumer, cfg.(*Config)) } diff --git a/processor/batchprocessor/go.mod b/processor/batchprocessor/go.mod index 5350beccf75..c5e72128cdf 100644 --- a/processor/batchprocessor/go.mod +++ b/processor/batchprocessor/go.mod @@ -8,13 +8,13 @@ require ( go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021093951-f2b31d131ae2 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 diff --git a/processor/go.mod b/processor/go.mod index edd9c909436..c7adacdd7c4 100644 --- a/processor/go.mod +++ b/processor/go.mod @@ -6,11 +6,11 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 diff --git a/processor/memorylimiterprocessor/go.mod b/processor/memorylimiterprocessor/go.mod index 6f09168b4ed..9859071ef59 100644 --- a/processor/memorylimiterprocessor/go.mod +++ b/processor/memorylimiterprocessor/go.mod @@ -7,14 +7,14 @@ require ( go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021093951-f2b31d131ae2 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/internal/memorylimiter v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 diff --git a/processor/processorprofiles/go.mod b/processor/processorprofiles/go.mod index 5ff65fd9c8b..78a7a720bf3 100644 --- a/processor/processorprofiles/go.mod +++ b/processor/processorprofiles/go.mod @@ -8,7 +8,7 @@ require ( go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 ) require ( @@ -19,7 +19,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect diff --git a/processor/processortest/go.mod b/processor/processortest/go.mod index 0766537bb37..da387fa8518 100644 --- a/processor/processortest/go.mod +++ b/processor/processortest/go.mod @@ -7,14 +7,14 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/component/componentstatus v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 go.uber.org/goleak v1.3.0 ) diff --git a/receiver/go.mod b/receiver/go.mod index 7fe4c8e0fa3..78ddb46c5f0 100644 --- a/receiver/go.mod +++ b/receiver/go.mod @@ -7,8 +7,8 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/config/configtelemetry v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021093951-f2b31d131ae2 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 diff --git a/receiver/nopreceiver/go.mod b/receiver/nopreceiver/go.mod index 739cedff950..825f19c1f1f 100644 --- a/receiver/nopreceiver/go.mod +++ b/receiver/nopreceiver/go.mod @@ -6,7 +6,7 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/receiver v0.111.0 go.uber.org/goleak v1.3.0 @@ -29,7 +29,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021093951-f2b31d131ae2 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect diff --git a/receiver/otlpreceiver/go.mod b/receiver/otlpreceiver/go.mod index d579a702513..b91d1beb3f0 100644 --- a/receiver/otlpreceiver/go.mod +++ b/receiver/otlpreceiver/go.mod @@ -15,8 +15,8 @@ require ( go.opentelemetry.io/collector/config/confignet v1.17.0 go.opentelemetry.io/collector/config/configtls v1.17.0 go.opentelemetry.io/collector/confmap v1.17.0 - go.opentelemetry.io/collector/consumer v0.111.0 - go.opentelemetry.io/collector/consumer/consumererror v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021093951-f2b31d131ae2 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/pdata v1.17.0 diff --git a/receiver/receiverprofiles/go.mod b/receiver/receiverprofiles/go.mod index 7f014dab7b5..c8a989abb7f 100644 --- a/receiver/receiverprofiles/go.mod +++ b/receiver/receiverprofiles/go.mod @@ -19,7 +19,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect - go.opentelemetry.io/collector/consumer v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/pdata v1.17.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect diff --git a/service/go.mod b/service/go.mod index f97bf80301f..bd4f755ee3f 100644 --- a/service/go.mod +++ b/service/go.mod @@ -17,12 +17,13 @@ require ( go.opentelemetry.io/collector/confmap v1.17.0 go.opentelemetry.io/collector/connector v0.111.0 go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 - go.opentelemetry.io/collector/connector/connectortest v0.111.0 - go.opentelemetry.io/collector/consumer v0.111.0 + go.opentelemetry.io/collector/connector/connectortest v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/consumer v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 go.opentelemetry.io/collector/consumer/consumertest v0.111.0 go.opentelemetry.io/collector/exporter v0.111.0 go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 + go.opentelemetry.io/collector/exporter/exportertest v0.111.0 go.opentelemetry.io/collector/extension v0.111.0 go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 @@ -31,10 +32,10 @@ require ( go.opentelemetry.io/collector/pdata/pprofile v0.111.0 go.opentelemetry.io/collector/pdata/testdata v0.111.0 go.opentelemetry.io/collector/pipeline v0.111.0 - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.111.0 - go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.0.0-20241021181817-007f06b7c4a8 + go.opentelemetry.io/collector/processor v0.111.1-0.20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 - go.opentelemetry.io/collector/processor/processortest v0.111.0 + go.opentelemetry.io/collector/processor/processortest v0.0.0-20241021181817-007f06b7c4a8 go.opentelemetry.io/collector/receiver v0.111.0 go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 go.opentelemetry.io/collector/semconv v0.111.0 @@ -96,7 +97,7 @@ require ( go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect go.opentelemetry.io/collector/config/internal v0.111.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.0.0-20241021181817-007f06b7c4a8 // indirect go.opentelemetry.io/collector/extension/auth v0.111.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect go.opentelemetry.io/contrib/zpages v0.56.0 // indirect @@ -189,10 +190,12 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../receiver/r replace go.opentelemetry.io/collector/processor/processorprofiles => ../processor/processorprofiles -replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles - replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporter/exporterprofiles replace go.opentelemetry.io/collector/pipeline/pipelineprofiles => ../pipeline/pipelineprofiles +replace go.opentelemetry.io/collector/exporter/exportertest => ../exporter/exportertest + replace go.opentelemetry.io/collector/consumer/consumererror => ../consumer/consumererror + +replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles diff --git a/versions.yaml b/versions.yaml index 4e6a12cad8d..9aaf56deed2 100644 --- a/versions.yaml +++ b/versions.yaml @@ -47,6 +47,7 @@ module-sets: - go.opentelemetry.io/collector/exporter/debugexporter - go.opentelemetry.io/collector/exporter/exporterprofiles - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles + - go.opentelemetry.io/collector/exporter/exportertest - go.opentelemetry.io/collector/exporter/nopexporter - go.opentelemetry.io/collector/exporter/otlpexporter - go.opentelemetry.io/collector/exporter/otlphttpexporter