diff --git a/.circleci/config.yml b/.circleci/config.yml
index f55d3007a3aa..4dba2fe2b283 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,26 +1,3 @@
-# Using Contexts:
-# some jobs depend on secrets like API tokens to work correctly such as publishing to docker hub
-# or reporting issues to GitHub. All such tokens are stored in CircleCI contexts (https://circleci.com/docs/2.0/contexts).
-#
-# All tokens stored in a contexts are injected into a job as environment variables IF the pipeline that runs the job
-# explicitly enables the context for the job.
-#
-# Contexts are protected with security groups. Jobs that use contexts will not run for commits from people who are not
-# part of the approved security groups for the given context. This means that contributors who are not part of the
-# OpenTelemetry GitHub organisation will not be able to run jobs that depend on contexts. As a result, PR pipelines
-# should never depend on any contexts and never use any tokens/secrets.
-#
-# This CI pipeline uses two contexts:
-# - github-release-and-issues-api-token
-# This context makes GITHUB_TOKEN available to jobs. Jobs can use the token to authenticate with the GitHub API.
-# We use this to report failures as issues back to the GitHub project.
-# Any member of the OpenTelemetry GitHub organisation can run jobs that require this context e.g, loadtest-with-github-reports.
-#
-# - dockerhub-token
-# This contexts makes DOCKER_HUB_USERNAME and DOCKER_HUB_PASSWORD environment variables available to the jobs.
-# This is used to publish docker images to Docker Hub.
-# Only project approvers and maintainers can run jobs that depend on this context such e.g, publish-stable.
-
version: 2.1
parameters:
@@ -36,6 +13,8 @@ parameters:
orbs:
win: circleci/windows@2.4.0
+ aws-cli: circleci/aws-cli@1.3.1
+ kubernetes: circleci/kubernetes@0.11.2
executors:
golang:
@@ -51,7 +30,6 @@ commands:
files:
type: string
default: |
- bin/otelcontribcol_darwin_arm64
bin/otelcontribcol_darwin_amd64
bin/otelcontribcol_linux_arm64
bin/otelcontribcol_linux_amd64
@@ -63,7 +41,7 @@ commands:
dist/otel-contrib-collector-*amd64.msi
steps:
- run:
- name: Check if files exist
+ name: Check if distribution files exist
command: |
files="<< parameters.files >>"
for f in $files; do
@@ -74,6 +52,7 @@ commands:
fi
done
+
setup:
steps:
- checkout
@@ -111,7 +90,7 @@ commands:
save_module_cache:
steps:
- save_cache:
- key: cimg-go-pkg-mod-{{ arch }}-{{ checksum "go.sum" }}-{{ checksum "internal/tools/go.sum" }}
+ key: cimg-go-pkg-mod-{{ arch }}-{{ checksum "go.sum" }}-v4
paths:
- "/home/circleci/go/pkg/mod"
@@ -122,7 +101,7 @@ commands:
command: mkdir -p ~/go/pkg/mod
- restore_cache: # restores saved cache if no changes are detected since last run
keys:
- - cimg-go-pkg-mod-{{ arch }}-{{ checksum "go.sum" }}-{{ checksum "internal/tools/go.sum" }}
+ - cimg-go-pkg-mod-{{ arch }}-{{ checksum "go.sum" }}-v4
install_fluentbit:
steps:
@@ -150,43 +129,28 @@ commands:
tag:
type: string
steps:
+ - run:
+ name: Setup Environment Variables
+ command: |
+ echo "export REGISTRY=public.ecr.aws/sumologic" >> $BASH_ENV
+ echo "export TAG_URL=public.ecr.aws/sumologic/<< parameters.repo >>:<< parameters.tag >>" >> $BASH_ENV
+ echo "export LATEST_URL=public.ecr.aws/sumologic/<< parameters.repo >>:latest" >> $BASH_ENV
- run:
name: Build image
command: |
make docker-otelcontribcol
- docker tag otelcontribcol:latest otel/<< parameters.repo >>:<< parameters.tag >>
- docker tag otelcontribcol:latest otel/<< parameters.repo >>:latest
+ docker tag otelcontribcol:latest ${TAG_URL}
+ docker tag otelcontribcol:latest ${LATEST_URL}
+ - aws-cli/install
- run:
- name: Login to Docker Hub
- command: docker login -u $DOCKER_HUB_USERNAME -p $DOCKER_HUB_PASSWORD
+ name: Login to AWS ECR
+ command: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${REGISTRY}
- run:
name: Push image
command: |
- docker push otel/<< parameters.repo >>:<< parameters.tag >>
- docker push otel/<< parameters.repo >>:latest
+ docker push ${TAG_URL}
+ docker push ${LATEST_URL}
- github_issue_generator:
- steps:
- - when:
- condition:
- equal: [main, << pipeline.git.branch >>]
- steps:
- - run:
- name: Generate GitHub Issue
- command: issuegenerator ${TEST_RESULTS}
- when: on_fail
-
- run_loadtest:
- steps:
- - restore_workspace
- - install_fluentbit
- - run:
- name: Loadtest
- command: TEST_ARGS="-test.run=$(make -C testbed -s list-loadtest-tests | circleci tests split|xargs echo|sed 's/ /|/g')" make e2e-test
- - store_artifacts:
- path: testbed/tests/results
- - store_test_results:
- path: testbed/tests/results/junit
workflows:
version: 2
@@ -201,15 +165,27 @@ workflows:
requires:
- setup
- run-stability-tests:
- context:
- - github-release-and-issues-api-token
requires:
- - cross-compile
+ - cross-compile
- publish-dev:
- context:
- - dockerhub-token
requires:
- run-stability-tests
+ filters:
+ branches:
+ only: /.*/
+ tags:
+ only: /.*/
+ - run-tracing-tests:
+ repo: opentelemetry-collector-dev
+ tag: ${CIRCLE_SHA1}
+ requires:
+ - publish-dev
+ filters:
+ branches:
+ only: /.*/
+ tags:
+ only: /.*/
+
build-publish:
when: << pipeline.parameters.run-build-publish >>
@@ -217,76 +193,52 @@ workflows:
- setup:
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- - build-examples:
+ only: /.*/
+ - lint:
requires:
- setup
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- - cross-compile:
+ only: /.*/
+ - build-examples:
requires:
- setup
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- - loadtest-with-github-reports:
- context:
- - github-release-and-issues-api-token
+ only: /.*/
+ - cross-compile:
requires:
- - cross-compile
+ - setup
filters:
- branches:
- only: main
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- - loadtest:
+ only: /.*/
+ - unit-tests:
requires:
- - cross-compile
+ - setup
filters:
- branches:
- ignore: main
+ tags:
+ only: /.*/
- windows-msi:
requires:
- cross-compile
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- # this publish-check step only runs on the main branch.
- # it is identical to the other publish-check step in all ways except
- # it runs loadtest-with-github-reports instead of loadtest.
- # This is because these jobs can access the GITHUB_TOKEN secret which is not available to PR builds.
- - publish-check:
- requires:
- - cross-compile
- - loadtest-with-github-reports
- - windows-msi
- - deb-package
- - rpm-package
- filters:
- branches:
- only: main
- # this publish-check step run for PR builds (all branches except main).
- # it runs the same jobs as the previous public-check step but
- # it uses the versions that do not need access to the
- # GITHUB_TOKEN secret.
+ only: /.*/
- publish-check:
requires:
+ - lint
+ - unit-tests
+ - integration-tests
- cross-compile
- - loadtest
- windows-msi
- deb-package
- rpm-package
- filters:
- branches:
- ignore: main
- publish-stable:
- context:
- - github-release-and-issues-api-token
- - dockerhub-token
requires:
+ - lint
+ - unit-tests
+ - integration-tests
- cross-compile
- - loadtest-with-github-reports
- windows-msi
- deb-package
- rpm-package
@@ -297,13 +249,19 @@ workflows:
only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
- spawn-stability-tests-job:
requires:
- - loadtest-with-github-reports
+ - lint
+ - unit-tests
+ - integration-tests
- cross-compile
filters:
branches:
- only: /main|release\/.+/
+ only: /main|release|tracing\/.+/
tags:
- ignore: /.*/
+ only: /.*/
+ - integration-tests:
+ filters:
+ tags:
+ only: /.*/
- build-package:
name: deb-package
package_type: deb
@@ -311,7 +269,7 @@ workflows:
- cross-compile
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
+ only: /.*/
- build-package:
name: rpm-package
package_type: rpm
@@ -319,7 +277,8 @@ workflows:
- cross-compile
filters:
tags:
- only: /^v[0-9]+\.[0-9]+\.[0-9]+.*/
+ only: /.*/
+
jobs:
setup:
@@ -331,6 +290,16 @@ jobs:
paths:
- project
- go/bin
+ lint:
+ executor: golang
+ steps:
+ - restore_workspace
+ - run:
+ name: Lint
+ command: make -j2 for-all-target TARGET=lint
+ - run:
+ name: Checks
+ command: make -j4 checklicense impi misspell
build-examples:
docker:
@@ -350,31 +319,20 @@ jobs:
- run:
name: Build collector for all archs
command: grep ^otelcontribcol-all-sys Makefile|fmt -w 1|tail -n +2|circleci tests split|xargs make
- - run:
- name: Log checksums to console
- command: shasum -a 256 bin/*
- persist_to_workspace:
root: ~/
paths: project/bin
- loadtest-with-github-reports:
+ unit-tests:
executor: golang
- parallelism: 6
- resource_class: medium+
- environment:
- TEST_RESULTS: testbed/tests/results/junit/results.xml
steps:
- - run_loadtest
- - github_issue_generator
-
- loadtest:
- executor: golang
- parallelism: 6
- resource_class: medium+
- environment:
- TEST_RESULTS: testbed/tests/results/junit/results.xml
- steps:
- - run_loadtest
+ - restore_workspace
+ - run:
+ name: Unit test
+ command: make test
+ - run:
+ name: Upload unit test coverage
+ command: bash <(curl -s https://codecov.io/bash) -F unit
windows-msi:
executor:
@@ -415,8 +373,6 @@ jobs:
command: echo "publish check failed. This means release CI jobs will likely fail as well"
when: on_fail
- # any pipeline using this job must enable "github-release-and-issues-api-token"
- # and "dockerhub-token" contexts
publish-stable:
docker:
- image: cimg/go:1.17
@@ -425,7 +381,7 @@ jobs:
- verify_dist_files_exist
- setup_remote_docker
- publish_docker_images:
- repo: opentelemetry-collector-contrib
+ repo: opentelemetry-collector
tag: ${CIRCLE_TAG:1}
- run:
name: Prepare release artifacts
@@ -438,8 +394,6 @@ jobs:
name: Create Github release and upload artifacts
command: ghr -t $GITHUB_TOKEN -u $CIRCLE_PROJECT_USERNAME -r $CIRCLE_PROJECT_REPONAME --replace $CIRCLE_TAG dist/
- # any pipeline using this job must enable "github-release-and-issues-api-token"
- # and "dockerhub-token" contexts
publish-dev:
executor: golang
steps:
@@ -452,7 +406,7 @@ jobs:
bin/otelcontribcol_windows_amd64.exe
- setup_remote_docker
- publish_docker_images:
- repo: opentelemetry-collector-contrib-dev
+ repo: opentelemetry-collector-dev
tag: ${CIRCLE_SHA1}
spawn-stability-tests-job:
@@ -461,10 +415,15 @@ jobs:
- run:
name: Trigger stability tests job
command: |
- curl -f -X POST "https://circleci.com/api/v2/project/github/open-telemetry/${CIRCLE_PROJECT_REPONAME}/pipeline?circle-token=${CIRCLE_API_TOKEN}" \
+ PARAM='"branch": "'"${CIRCLE_BRANCH}"'"'
+ if [ -z "$CIRCLE_BRANCH" ]; then
+ PARAM='"tag": "'"${CIRCLE_TAG}"'"'
+ fi
+ curl -f -X POST "https://circleci.com/api/v2/project/github/SumoLogic/${CIRCLE_PROJECT_REPONAME}/pipeline" \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
- -d '{"parameters": {"run-build-publish": false, "run-stability-tests": true, "collector-sha": "'"${CIRCLE_SHA1}"'"}, "branch": "'"${CIRCLE_BRANCH}"'"}'
+ -H "Circle-Token: ${CIRCLE_API_TOKEN}" \
+ -d '{"parameters": {"run-build-publish": false, "run-stability-tests": true, "collector-sha": "'"${CIRCLE_SHA1}"'"}, '"${PARAM}"'}'
checkout-commit:
executor: golang
@@ -476,14 +435,12 @@ jobs:
git checkout << pipeline.parameters.collector-sha >>
git status
- # this jobs reports failures as github issues and as a result, any pipeline using this job
- # must enable "github-release-and-issues-api-token" context
run-stability-tests:
parameters:
# Number of runners must be always in sync with number of stability tests,
# so every node runs exactly one stability test.
runners-number:
- type: integer
+ type: integer
default: 9
executor: golang
resource_class: medium+
@@ -508,7 +465,51 @@ jobs:
path: testbed/stabilitytests/results
- store_test_results:
path: testbed/stabilitytests/results/junit
- - github_issue_generator
+ - run:
+ name: Run on fail status
+ command: |
+ curl --request POST \
+ --url https://api.github.com/repos/SumoLogic/opentelemetry-collector-contrib/issues \
+ --header "authorization: Bearer ${GITHUB_TOKEN}" \
+ --header "content-type: application/json" \
+ --data '{
+ "title": "Stability tests failed in branch '"${CIRCLE_BRANCH}"' for commit << pipeline.parameters.collector-sha >>",
+ "body": "Link to failed job: '"${CIRCLE_BUILD_URL}"'."
+ }'
+ when: on_fail
+
+ integration-tests:
+ executor: machine
+ environment:
+ GOPATH: /home/circleci/go
+ steps:
+ - setup_go
+ - setup
+ - run:
+ name: Integration tests with coverage
+ command: |
+ mkdir -p test-results/junit
+ trap "go-junit-report -set-exit-code < test-results/go-integration-tests.out > test-results/junit/results.xml" EXIT
+ make integration-tests-with-cover | tee test-results/go-integration-tests.out
+ - run:
+ name: Upload integration test coverage
+ command: bash <(curl -s https://codecov.io/bash) -F integration
+ - store_test_results:
+ path: test-results/junit
+ - store_artifacts:
+ path: test-results
+ - run:
+ name: Run on fail status
+ command: |
+ curl --request POST \
+ --url https://api.github.com/repos/SumoLogic/opentelemetry-collector-contrib/issues \
+ --header "authorization: Bearer ${GITHUB_TOKEN}" \
+ --header "content-type: application/json" \
+ --data '{
+ "title": "Stability tests failed in branch '"${CIRCLE_BRANCH}"' for commit << pipeline.parameters.collector-sha >>",
+ "body": "Link to failed job: '"${CIRCLE_BUILD_URL}"'."
+ }'
+ when: on_fail
build-package:
machine:
@@ -543,3 +544,67 @@ jobs:
- persist_to_workspace:
root: ~/
paths: project/dist/*.<< parameters.package_type >>
+
+ run-tracing-tests:
+ parameters:
+ repo:
+ type: string
+ tag:
+ type: string
+ docker:
+ - image: ${INFRASTRUCTURE_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/tracing-tests/tests:latest
+ aws_auth:
+ aws_access_key_id: ${TRACING_TESTS_AWS_ACCESS_KEY_ID}
+ aws_secret_access_key: ${TRACING_TESTS_AWS_SECRET_ACCESS_KEY}
+ steps:
+ - run:
+ name: "Configure environment variables"
+ command: |
+ echo "export SUMO_API_ENDPOINT=${TRACING_TESTS_SUMO_API_ENDPOINT}" >> $BASH_ENV
+ echo "export OTELCOL_HEALTHCHECK_URL=${TRACING_TESTS_OTELCOL_URL}" >> $BASH_ENV
+ echo "export SUMO_ACCESS_ID=${TRACING_TESTS_SUMO_ACCESS_ID}" >> $BASH_ENV
+ echo "export SUMO_ACCESS_KEY=${TRACING_TESTS_SUMO_ACCESS_KEY}" >> $BASH_ENV
+ echo "export PYTHONWARNINGS=ignore:Unverified HTTPS request" >> $BASH_ENV
+ echo "export AWS_ACCESS_KEY_ID=${TRACING_TESTS_CLUSTER_AWS_ACCESS_ID}" >> $BASH_ENV
+ echo "export AWS_SECRET_ACCESS_KEY=${TRACING_TESTS_CLUSTER_AWS_ACCESS_KEY}" >> $BASH_ENV
+ - kubernetes/install-kubeconfig:
+ kubeconfig: TRACING_TESTS_CLUSTER_KUBECONFIG_DATA
+ - kubernetes/install-kubectl
+ - aws-cli/install
+ - run:
+ name: "Clean up environment"
+ command: /opt/tracing-tests/deployment_scripts/clean-up-env.sh
+ - run:
+ name: "Deploy Sumologic OpenTelemetry Collector"
+ command: /opt/tracing-tests/deployment_scripts/deploy-otelcol.sh << parameters.repo >> << parameters.tag >>
+ - run:
+ name: "Wait for Sumologic OpenTelemetry Collector to be available"
+ command: kubectl -n java-app wait --for=condition=ready --timeout=120s pod -l app=otelcol
+ - run:
+ name: "Deploy ECR Registry Secret"
+ command: /opt/tracing-tests/deployment_scripts/deploy-ecr-registry-secret.sh
+ - run:
+ name: "Deploy Kubernetes Metadata Provider application"
+ command: /opt/tracing-tests/deployment_scripts/deploy-k8sfeeder.sh
+ - run:
+ name: "Wait for Kubernetes Metadata Provider"
+ command: kubectl -n java-app wait --for=condition=ready --timeout=60s pod -l app=k8s-feeder
+ - run:
+ name: "Get Kubernetes Metadata provider URL"
+ command: echo "export KUBERNETES_METADATA_URL=$(kubectl -n java-app get svc k8s-feeder-svc-pub -o json | jq .status.loadBalancer.ingress[0].hostname)" >> $BASH_ENV
+ - run:
+ name: "Deploy Java App application"
+ command: /opt/tracing-tests/deployment_scripts/deploy-test-applications.sh
+ - run:
+ name: "Wait for Kubernetes Metadata Provider"
+ command: kubectl -n java-app wait --for=condition=ready --timeout=60s pod -l app=server
+ - run:
+ name: "Wait for data..."
+ command: sleep 180
+ - run:
+ name: "Execute Tracing Tests"
+ command: "pytest --rootdir=/opt/tracing-tests --junitxml=/opt/tracing-tests/test-results/junit.xml --html=/opt/tracing-tests/test-results/report.html --self-contained-html -vvv /opt/tracing-tests/tests"
+ - store_test_results:
+ path: /opt/tracing-tests/test-results
+ - store_artifacts:
+ path: /opt/tracing-tests/test-results
diff --git a/.github/workflows/opentelemetry-collector-builder.yaml b/.github/workflows/opentelemetry-collector-builder.yaml
new file mode 100644
index 000000000000..d7a4618e34e2
--- /dev/null
+++ b/.github/workflows/opentelemetry-collector-builder.yaml
@@ -0,0 +1,49 @@
+name: OpenTelemetry Collector Builder
+
+on:
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ # otelcolbuilder_post_go1_16:
+ # runs-on: ubuntu-20.04
+ # strategy:
+ # matrix:
+ # go: [ '1.16' ]
+ # steps:
+ # - uses: actions/checkout@v2
+ # - name: Setup go
+ # uses: actions/setup-go@v2
+ # with:
+ # go-version: ${{ matrix.go }}
+ # - name: Print go version
+ # run: go version
+ # - name: Build OpenTelemetry distro
+ # working-directory: ./otelcolbuilder/
+ # run: |
+ # go install github.com/open-telemetry/opentelemetry-collector-builder@v0.24.0
+ # make build
+
+ # Just build on 1.15 for now because of a weird issue:
+ # https://github.com/actions/setup-go/issues/107
+ otelcolbuilder_pre_go1_16:
+ runs-on: ubuntu-20.04
+ strategy:
+ matrix:
+ go: [ '1.15' ]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup go
+ uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go }}
+ - name: Print go version
+ run: go version
+ - name: Print go env
+ run: go env
+ - name: Build OpenTelemetry distro
+ working-directory: ./otelcolbuilder/
+ run: |
+ make install-prego1.16
+ make build
diff --git a/.gitignore b/.gitignore
index c55af97cdace..0774532d4860 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,6 +31,10 @@ integration-coverage.txt
coverage.html
integration-coverage.html
+# vagrant
+.vagrant
+ubuntu-bionic-18.04-cloudimg-console.log
+
# Wix
*.wixobj
*.wixpdb
diff --git a/README.md b/README.md
index fd8c89a2ec2e..3a6aea14e119 100644
--- a/README.md
+++ b/README.md
@@ -1,110 +1,26 @@
----
+# SumoLogic / OpenTelemetry Collector Contrib
+This is a repository for OpenTelemetry Collector Contrib with additional Sumo Logic extensions. It is based
+on the [opentelemetry-collector-contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) and
+[core distribution of the Collector](https://github.com/open-telemetry/opentelemetry-collector).
-
-
- Getting Started
- •
- Getting Involved
- •
- Getting In Touch
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+## Docker Images
+Docker images for all releases are published at https://hub.docker.com/r/sumologic/opentelemetry-collector
-
-
- Vision
- •
- Design
- •
- Monitoring
- •
- Performance
- •
- Security
- •
- Roadmap
-
-
+### Building docker locally
----
+```
+docker build -f cmd/otelcontribcol/Dockerfile -t otelcontribcol .
+```
-# OpenTelemetry Collector Contrib
+## Differences from the core release
-This is a repository for OpenTelemetry Collector contributions that are not
-part of the [core
-distribution](https://github.com/open-telemetry/opentelemetry-collector) of the
-Collector. Typically, these contributions are vendor specific
-receivers/exporters and/or components that are only useful to a relatively
-small number of users.
+SumoLogic version of OpenTelemetry Collector introduces a number of additions over the plain version:
-> Please note that this repository and its releases are a superset of the core repository.
-
-## Contributing
-
-See [CONTRIBUTING.md](CONTRIBUTING.md).
-
-Triagers ([@open-telemetry/collector-contrib-triagers](https://github.com/orgs/open-telemetry/teams/collector-contrib-triagers))
-- [Alolita Sharma](https://github.com/alolita), AWS
-- [Punya Biswal](https://github.com/punya), Google
-- [Steve Flanders](https://github.com/flands), Splunk
-
-Approvers ([@open-telemetry/collector-contrib-approvers](https://github.com/orgs/open-telemetry/teams/collector-contrib-approvers)):
-
-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
-- [Anuraag Agrawal](https://github.com/anuraaga), AWS
-- [Daniel Jaglowski](https://github.com/djaglowski), observIQ
-- [Dmitrii Anoshin](https://github.com/dmitryax), Splunk
-- [Juraci Paixão Kröhling](https://github.com/jpkrohling), Grafana Labs
-- [Kevin Brockhoff](https://github.com/kbrockhoff), Daugherty Business Solutions
-- [Pablo Baeyens](https://github.com/mx-psi), DataDog
-- [Owais Lone](https://github.com/owais), Splunk
-
-Maintainers ([@open-telemetry/collector-contrib-maintainer](https://github.com/orgs/open-telemetry/teams/collector-contrib-maintainer)):
-
-- [Alex Boten](https://github.com/codeboten), Lightstep
-- [Bogdan Drutu](https://github.com/BogdanDrutu), Splunk
-- [Tigran Najaryan](https://github.com/tigrannajaryan), Splunk
-
-Learn more about roles in the [community repository](https://github.com/open-telemetry/community/blob/main/community-membership.md).
-
-## PRs and Reviews
-
-When creating a PR please following the process [described
-here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CONTRIBUTING.md#how-to-structure-prs-to-get-expedient-reviews).
-
-News PRs will be automatically associated with the reviewers based on
-[CODEOWNERS](.github/CODEOWNERS). PRs will be also automatically assigned to one of the
-maintainers or approvers for facilitation.
-
-The facilitator is responsible for helping the PR author and reviewers to make progress
-or if progress cannot be made for closing the PR.
-
-If the reviewers do not have approval rights the facilitator is also responsible
-for the official approval that is required for the PR to be merged and if the facilitator
-is a maintainer they are responsible for merging the PR as well.
-
-The facilitator is not required to perform a thorough review, but they are encouraged to
-enforce Collector best practices and consistency across the codebase and component
-behavior. The facilitators will typically rely on codeowner's detailed review of the code
-when making the final approval decision.
-
-We recommend maintainers and approvers to keep an eye on the
-[project board](https://github.com/orgs/open-telemetry/projects/3). All newly created
-PRs are automatically added to this board. (If you don't see the PR on the board you
-may need to add it manually by setting the Project field in the PR view).
+* Extensions to [k8sprocessor](https://github.com/SumoLogic/opentelemetry-collector-contrib/tree/master/processor/k8sprocessor)
+ which include more tags being collected and field extraction enhancements
+* A [sourceprocessor](https://github.com/SumoLogic/opentelemetry-collector-contrib/tree/master/processor/sourceprocessor) that
+ adds additional tags (mostly relevant to K8s environment) and provides some data filtering rules
+* [Cascading filter processor](https://github.com/pmm-sumo/opentelemetry-collector-contrib/tree/remote-conf-poc/processor/cascadingfilterprocessor)
+ extensions, which include *cascading* policy with two-pass rules for determining span budget for each of the defined rules
+* Additional release schedule, which allows to quickly introduce bugfixes and extensions
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 000000000000..ae4b149e9fda
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,17 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure('2') do |config|
+ config.vm.box = 'ubuntu/bionic64'
+ config.vm.box_check_update = false
+ config.vm.host_name = 'opentelemetry-collector-contrib'
+ config.vm.network :private_network, ip: "192.168.33.33"
+
+ config.vm.provider 'virtualbox' do |vb|
+ vb.gui = false
+ vb.memory = 4096
+ vb.name = 'opentelemetry-collector-contrib'
+ end
+ config.vm.provision 'file', source: 'vagrant', destination: 'vagrant'
+ config.vm.provision 'shell', path: 'vagrant/provision.sh'
+end
diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod
index c4a14656c772..55bf563ac3d0 100644
--- a/cmd/configschema/go.mod
+++ b/cmd/configschema/go.mod
@@ -220,18 +220,21 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.38.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.38.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.38.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.38.0 // indirect
@@ -592,8 +595,16 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/grou
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor => ../../processor/groupbytraceprocessor
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor => ../../processor/cascadingfilterprocessor/
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor => ../../processor/k8sprocessor/
+
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor => ../../processor/k8sattributesprocessor/
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor => ../../processor/sourceprocessor/
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicsyslogprocessor => ../../processor/sumologicsyslogprocessor/
+
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => ../../processor/resourcedetectionprocessor/
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor => ../../processor/resourceprocessor/
diff --git a/cmd/otelcontribcol/Dockerfile b/cmd/otelcontribcol/Dockerfile
index 20ad2be6ab41..0bf86cc7a01d 100644
--- a/cmd/otelcontribcol/Dockerfile
+++ b/cmd/otelcontribcol/Dockerfile
@@ -11,6 +11,6 @@ USER ${USER_UID}
COPY --from=prep /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY otelcontribcol /
-EXPOSE 4317 55680 55679
+EXPOSE 4317 4318 55680 55679
ENTRYPOINT ["/otelcontribcol"]
CMD ["--config", "/etc/otel/config.yaml"]
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 000000000000..9d0804faeb64
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,71 @@
+# Examples
+## Kubernetes configuration
+
+### Helm chart values template
+[kubernetes/custom-values.yaml](./kubernetes/custom-values.yaml) contains
+an example template for Sumologic Kubernetes Collection Helm chart, which
+installs OpenTelemetry Collector in Agent and Gateway configuration, as described
+in the [documentation](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing/Set_up_traces_collection_for_Kubernetes_environments).
+
+After filling the template values, you can install it following
+[Sumologic Kubernetes Collection installation instructions](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/release-v2.0/deploy/docs/Installation_with_Helm.md)
+For example, by running following commands:
+```shell
+helm repo add sumologic https://sumologic.github.io/sumologic-kubernetes-collection
+kubectl create namespace sumologic
+helm upgrade --install my-release -n sumologic sumologic/sumologic -f custom-values.yaml
+```
+
+### Helm chart values template with cascading filter enabled
+
+Additionally, [kubernetes/custom-values-cascading-filter.yaml](./kubernetes/custom-values-cascading-filter.yaml)
+includes an alternative example template that enables cascading filter,
+as described in [trace filtering documentation](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing/What_if_I_don't_want_to_send_all_the_tracing_data_to_Sumo_Logic%3F).
+Note that cascading filter is currently supported only for single-instance
+OpenTelemetry Collector deployments.
+
+## Non-kubernetes configuration
+
+### Agent configuration (should be run on each host/node)
+[non-kubernetes/agent-configuration-template.yaml](non-kubernetes/agent-configuration-template.yaml) contains
+an OpenTelemetry Collector YAML file which includes configuration
+for OpenTelemetry Collector running in Agent mode. It should be
+deployed on each host/node within the system.
+
+### Gateway configuration (should be run per each cluster/data-center/etc.)
+[non-kubernetes/gateway-configuration-template.yaml](non-kubernetes/gateway-configuration-template.yaml) contains
+an OpenTelemetry Collector YAML file which includes configuration
+for OpenTelemetry Collector running in Gateway mode.
+
+Additionally, for [non-kubernetes/gateway-configuration-template-with-cascading-filter.yaml](non-kubernetes/gateway-configuration-template-with-cascading-filter.yaml)
+the configuration also includes cascading filter config,
+which is described in more detail in [trace filtering documentation](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing/What_if_I_don't_want_to_send_all_the_tracing_data_to_Sumo_Logic%3F).
+
+Please refer to [relevant documentation](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing/Set_up_traces_collection_for_other_environments)
+for more details.
+
+### AWS OTel Collector configuration file
+[non-kubernetes/aws-otel-config.yaml](non-kubernetes/aws-otel-config.yaml) contains
+an AWS OpenTelemetry Collector distrubtion YAML file which includes configuration
+for OpenTelemetry Collector. Should be deployed on the AWS environments.
+
+### AWS OTel Collector for ECS in EC2 mode template
+[non-kubernetes/aws-otel-ecs-ec2-deployment.yaml](non-kubernetes/aws-otel-ecs-ec2-deployment.yaml) contains
+an AWS OpenTelemetry Collector distribution YAML file which includes
+CloudFormation template. It should be deployed on the AWS ECS EC2
+environment.
+
+### AWS OTel Collector for ECS in Fargate mode template
+[non-kubernetes/aws-otel-ecs-fargate-deployment.yaml](non-kubernetes/aws-otel-ecs-fargate-deployment.yaml) contains
+an AWS OpenTelemetry Collector distribution YAML file which includes
+CloudFormation template. It should be deployed on the AWS ECS Fargate
+environment.
+
+### AWS OTel Collector for EC2 deployment template
+[non-kubernetes/aws-otel-ec2-deployment.yaml](non-kubernetes/aws-otel-ec2-deployment.yaml) contains
+an AWS OpenTelemetry Collector distribution YAML file which includes
+CloudFormation template. It should be deployed on the AWS EC2.
+
+### AWS Distro for OpenTelemetry configuration
+[aws_lambda/aws-distro-collector-lambda-layer-config.yaml](aws_lambda/aws-distro-collector-lambda-layer-config.yaml) contains
+an [AWS Distro for Opentelemetry Collector](https://github.com/aws-observability/aws-otel-lambda/tree/main/extensions/aoc-extension) YAML file which includes configuration for collector installed in a Lambda Layer. Collector requires *SUMOLOGIC_HTTP_TRACES_ENDPOINT_URL* environment variable to be set.
diff --git a/examples/aws_lambda/aws-distro-collector-lambda-layer-config.yaml b/examples/aws_lambda/aws-distro-collector-lambda-layer-config.yaml
new file mode 100644
index 000000000000..9b0f7fdfcc02
--- /dev/null
+++ b/examples/aws_lambda/aws-distro-collector-lambda-layer-config.yaml
@@ -0,0 +1,16 @@
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: "localhost:55680"
+
+exporters:
+ otlphttp:
+ traces_endpoint: $SUMOLOGIC_HTTP_TRACES_ENDPOINT_URL
+ insecure: true
+
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ exporters: [otlphttp]
diff --git a/examples/kubernetes/custom-values-cascading-filter.yaml b/examples/kubernetes/custom-values-cascading-filter.yaml
new file mode 100644
index 000000000000..c242554ead7b
--- /dev/null
+++ b/examples/kubernetes/custom-values-cascading-filter.yaml
@@ -0,0 +1,73 @@
+sumologic:
+ accessId:
+ accessKey:
+ clusterName:
+ traces:
+ enabled: true
+## Following enables OpenTelemetry Agent which runs on each node as a DaemonSet
+otelagent:
+ enabled:
+ true
+## Following configures OpenTelemetry Collector (gateway)
+## Note that if cascading_filter is used, deployment must include only a single instance
+otelcol:
+ metrics:
+ ## This enables exposing OpenTelemetry Collector metrics. Note that they will consume your DPM
+ ## hence by default they are disabled
+ enabled:
+ true
+ config:
+ processors:
+ ## Following enables a smart cascading filtering rules with preset limits.
+ cascading_filter:
+ ## (default = 30s): Wait time since the first span of a trace before making
+ ## a filtering decision
+ decision_wait: 30s
+ ## (default = 50000): Number of traces kept in memory
+ num_traces: 50000
+ ## (default = 0): Expected number of new traces (helps in allocating data structures)
+ expected_new_traces_per_sec: 100
+ ## (default = 0): defines maximum number of spans per second
+ spans_per_second: 1600
+ ## (default = 0.2): Ratio of spans that are always probabilistically filtered
+ ## (hence might be used for metrics calculation).
+ probabilistic_filtering_ratio: 0.2
+ ## (no default): Policies used to make a sampling decision
+ policies:
+ - name: sampling-priority,
+ ## string_attribute: allows to specify conditions that need to be met
+ string_attribute: {
+ key: sampling.priority, values: [ "1" ]
+ },
+ ## Spans_per_second: max number of emitted spans per second by this policy.
+ spans_per_second: 500
+ - name: everything-else
+ ## This selects all traces, up the to the global limit
+ spans_per_second: -1
+ ## Following are some examples of other rules that could be used
+ # - name: extended-duration
+ # ## Spans_per_second: max number of emitted spans per second by this policy.
+ # spans_per_second: 500
+ # properties:
+ # ## Selects the span if the duration is greater or equal the given
+ # ## value (use s or ms as the suffix to indicate unit).
+ # min_duration: 5s
+ # - name: "status_code_condition",
+ # ## Spans_per_second: max number of emitted spans per second by this policy.
+ # spans_per_second: 500,
+ # ## numeric_attribute: provides a list of conditions that need to be met
+ # numeric_attribute: {
+ # key: "http.status_code", min_value: 400, max_value: 999
+ # }
+ # - name: everything-that-is-not-healthcheck
+ # ## This selects all traces where there is NO span starting with `health` operation name
+ # ## If employed, "everything-else" rule must be replaced with it
+ # properties:
+ # name_pattern: "^(healthcheck|otherhealthcheck).*"
+ # invert_match: true
+ # spans_per_second: -1
+ service:
+ pipelines:
+ traces:
+ ## This is required to enable cascading_filter
+ processors: [memory_limiter, k8s_tagger, source, resource, cascading_filter, batch]
diff --git a/examples/kubernetes/custom-values.yaml b/examples/kubernetes/custom-values.yaml
new file mode 100644
index 000000000000..801f704aec21
--- /dev/null
+++ b/examples/kubernetes/custom-values.yaml
@@ -0,0 +1,16 @@
+sumologic:
+ accessId:
+ accessKey:
+ clusterName:
+ traces:
+ enabled: true
+otelcol:
+ ## This enables exposing OpenTelemetry Collector metrics. Note that they will consume your DPM
+ ## hence by default they are disabled
+ metrics:
+ enabled:
+ true
+## Following enables OpenTelemetry Agent which runs on each node as a DaemonSet
+otelagent:
+ enabled:
+ true
\ No newline at end of file
diff --git a/examples/non-kubernetes/agent-configuration-template.yaml b/examples/non-kubernetes/agent-configuration-template.yaml
new file mode 100644
index 000000000000..51de27ff427c
--- /dev/null
+++ b/examples/non-kubernetes/agent-configuration-template.yaml
@@ -0,0 +1,70 @@
+receivers:
+ jaeger:
+ protocols:
+ thrift_compact:
+ endpoint: "0.0.0.0:6831"
+ thrift_binary:
+ endpoint: "0.0.0.0:6832"
+ grpc:
+ endpoint: "0.0.0.0:14250"
+ thrift_http:
+ endpoint: "0.0.0.0:14268"
+ opencensus:
+ endpoint: "0.0.0.0:55678"
+ otlp:
+ protocols:
+ grpc:
+ endpoint: "0.0.0.0:4317"
+ http:
+ endpoint: "0.0.0.0:55681"
+ zipkin:
+ endpoint: "0.0.0.0:9411"
+processors:
+ ## The memory_limiter processor is used to prevent out of memory situations on the collector.
+ memory_limiter:
+ ## check_interval is the time between measurements of memory usage for the
+ ## purposes of avoiding going over the limits. Defaults to zero, so no
+ ## checks will be performed. Values below 1 second are not recommended since
+ ## it can result in unnecessary CPU consumption.
+ check_interval: 5s
+
+ ## Maximum amount of memory, in MiB, targeted to be allocated by the process heap.
+ ## Note that typically the total memory usage of process will be about 50MiB higher
+ ## than this value.
+ limit_mib: 500
+
+ ## Please enable/disable accordingly if on AWS, GCE, ECS, elastic_beanstalk or neither
+ resourcedetection:
+ detectors: [ ec2, gce, ecs, elastic_beanstalk ]
+ timeout: 5s
+ override: false
+
+ ## The batch processor accepts spans and places them into batches grouped by node and resource
+ batch:
+ ## Number of spans after which a batch will be sent regardless of time
+ send_batch_size: 256
+ ## Never more than this many spans are being sent in a batch
+ send_batch_max_size: 512
+ ## Time duration after which a batch will be sent regardless of size
+ timeout: 5s
+
+extensions:
+ health_check: {}
+exporters:
+ otlp:
+ ## Please enter OpenTelemetry Collector Gateway address here
+ endpoint: HOSTNAME
+ insecure: true
+ ## Following generates verbose logs with span content, useful to verify what
+ ## metadata is being tagged. To enable, uncomment and add "logging" to exporters below.
+ ## There are two levels that could be used: `debug` and `info` with the former
+ ## being much more verbose and including (sampled) spans content
+ # logging:
+ # loglevel: debug
+service:
+ extensions: [health_check]
+ pipelines:
+ traces:
+ receivers: [jaeger, opencensus, otlp, zipkin]
+ processors: [memory_limiter, resourcedetection, batch]
+ exporters: [otlp]
diff --git a/examples/non-kubernetes/aws-otel-config-file.yaml b/examples/non-kubernetes/aws-otel-config-file.yaml
new file mode 100644
index 000000000000..75722e5fe8f6
--- /dev/null
+++ b/examples/non-kubernetes/aws-otel-config-file.yaml
@@ -0,0 +1,42 @@
+extensions:
+ health_check:
+receivers:
+ awsxray:
+ endpoint: 0.0.0.0:2000
+ transport: udp
+ jaeger:
+ protocols:
+ thrift_compact:
+ endpoint: 0.0.0.0:6831
+ thrift_binary:
+ endpoint: 0.0.0.0:6832
+ grpc:
+ endpoint: 0.0.0.0:14250
+ thrift_http:
+ endpoint: 0.0.0.0:14268
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:55681
+ zipkin:
+ endpoint: 0.0.0.0:9411
+processors:
+ batch/traces:
+ timeout: 5s
+ send_batch_size: 256
+ resourcedetection:
+ detectors: [env, ec2, ecs]
+ timeout: 5s
+ override: true
+exporters:
+ otlphttp:
+ endpoint: $SUMO_HTTP_TRACES_URL
+service:
+ extensions: [health_check]
+ pipelines:
+ traces:
+ receivers: [awsxray,jaeger,otlp,zipkin]
+ processors: [resourcedetection,batch/traces]
+ exporters: [otlphttp]
diff --git a/examples/non-kubernetes/aws-otel-ec2-deployment.yaml b/examples/non-kubernetes/aws-otel-ec2-deployment.yaml
new file mode 100644
index 000000000000..6cfef8fcec26
--- /dev/null
+++ b/examples/non-kubernetes/aws-otel-ec2-deployment.yaml
@@ -0,0 +1,242 @@
+---
+AWSTemplateFormatVersion: '2010-09-09'
+Description: 'Template to install AWS OTel Collector on EC2 - Amazon Linux.'
+Parameters:
+ SSHKeyName:
+ Description: Name of an existing EC2 KeyPair to enable SSH access to the instance
+ Type: AWS::EC2::KeyPair::KeyName
+ ConstraintDescription: must be the name of an existing EC2 KeyPair.
+ InstanceType:
+ Description: EC2 instance type
+ Type: String
+ Default: m4.2xlarge
+ ConstraintDescription: must be a valid EC2 instance type.
+ InstanceAMI:
+ Description: Managed AMI ID for EC2 Instance
+ Type : String
+ IAMRole:
+ Description: EC2 attached IAM role
+ Type: String
+ Default: SumologicAWSOTelColRoleEC2
+ ConstraintDescription: must be an existing IAM role which will be attached to EC2 instance.
+ IAMPolicy:
+ Description: IAM Role attached IAM Managed Policy
+ Type: String
+ Default: SumologicAWSOTelColPolicyEC2
+ ConstraintDescription: Must be an existing IAM Managed Policy which will be attached to IAM Role.
+ IAMInstanceProfileName:
+ Description: IAM Role attached IAM Instance Profile
+ Type: String
+ Default: SumologicAWSOTelColRoleEC2
+ ConstraintDescription: Must be an existing IAM Instance Profile which will be attached to IAM Role.
+ SumoHttpTracesURL:
+ Type: String
+ Description: Enther the Sumologic HTTP Traces Endpoint URL
+Resources:
+ EC2Instance:
+ Type: AWS::EC2::Instance
+ Metadata:
+ AWS::CloudFormation::Init:
+ configSets:
+ default:
+ - 01_setupCfnHup
+ - 02_config-aws-otel-collector
+ - 03_restart-aws-otel-collector
+ UpdateEnvironment:
+ - 02_config-aws-otel-collector
+ - 03_restart-aws-otel-collector
+ # Definition of YAML configuration of aws-otel-collector, you can change the configuration below.
+ 02_config-aws-otel-collector:
+ files:
+ '/opt/aws/aws-otel-collector/etc/config.yaml':
+ content: !Sub
+ - |
+ extensions:
+ health_check:
+ receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:55681
+ awsxray:
+ endpoint: 0.0.0.0:2000
+ transport: udp
+ processors:
+ batch/traces:
+ timeout: 1s
+ send_batch_size: 50
+ batch/metrics:
+ timeout: 60s
+ exporters:
+ otlphttp:
+ endpoint: ${sumo_http_traces_url}
+ service:
+ extensions: [health_check]
+ pipelines:
+ traces:
+ receivers: [otlp,awsxray]
+ processors: [batch/traces]
+ exporters: [otlphttp]
+ metrics:
+ receivers: [otlp]
+ processors: [batch/metrics]
+ exporters: [otlphttp]
+ - sumo_http_traces_url: !Ref SumoHttpTracesURL
+ # Invoke aws-otel-collector-ctl to restart aws-otel-collector.
+ 03_restart-aws-otel-collector:
+ commands:
+ 01_stop_service:
+ command: sudo /opt/aws/aws-otel-collector/bin/aws-otel-collector-ctl -a stop
+ 02_start_service:
+ command: sudo /opt/aws/aws-otel-collector/bin/aws-otel-collector-ctl -a start
+ # Cfn-hup setting, it is to monitor the change of metadata.
+ # When there is change in the contents of json file in the metadata section, cfn-hup will call cfn-init to restart aws-otel-collector.
+ 01_setupCfnHup:
+ files:
+ '/etc/cfn/cfn-hup.conf':
+ content: !Sub |
+ [main]
+ stack=${AWS::StackId}
+ region=${AWS::Region}
+ interval=1
+ mode: '000400'
+ owner: root
+ group: root
+ '/etc/cfn/hooks.d/aws-otel-collector-auto-reloader.conf':
+ content: !Sub |
+ [cfn-auto-reloader-hook]
+ triggers=post.update
+ path=Resources.EC2Instance.Metadata.AWS::CloudFormation::Init.02_config-aws-otel-collector
+ action=/opt/aws/bin/cfn-init -v --stack ${AWS::StackId} --resource EC2Instance --region ${AWS::Region} --configsets UpdateEnvironment
+ runas=root
+ mode: '000400'
+ owner: root
+ group: root
+ "/lib/systemd/system/cfn-hup.service":
+ content: !Sub |
+ [Unit]
+ Description=cfn-hup daemon
+ [Service]
+ Type=simple
+ ExecStart=/opt/aws/bin/cfn-hup
+ Restart=always
+ [Install]
+ WantedBy=multi-user.target
+ commands:
+ 01enable_cfn_hup:
+ command: !Sub |
+ systemctl enable cfn-hup.service
+ 02start_cfn_hup:
+ command: !Sub |
+ systemctl start cfn-hup.service
+
+ Properties:
+ InstanceType:
+ Ref: InstanceType
+ IamInstanceProfile:
+ Ref: IAMRole
+ KeyName:
+ Ref: SSHKeyName
+ ImageId:
+ Ref: InstanceAMI
+ SecurityGroups:
+ - Ref: InstanceSecurityGroup
+ Tags:
+ - Key: Name
+ Value: sumologic-aws-otel-col-ec2
+ UserData:
+ # This script below is to install aws-otel-collector, restart aws-otel-collector and tell the result to cloudformation.
+ Fn::Base64: !Sub
+ - |
+ #!/bin/bash
+
+ # Download AWS OTel Collector RPM
+ sudo rpm -Uvh https://aws-otel-collector.s3.amazonaws.com/amazon_linux/amd64/latest/aws-otel-collector.rpm
+
+ # Setup Sumologic HTTP Traces URL ENV
+ echo "export SUMO_HTTP_TRACES_URL=${sumo_http_traces_url}" > /etc/profile.d/setSumoVar.sh
+
+ /opt/aws/bin/cfn-init -v --stack ${AWS::StackId} --resource EC2Instance --region ${AWS::Region} --configsets default
+ /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackId} --resource EC2Instance --region ${AWS::Region}
+ - sumo_http_traces_url: !Ref SumoHttpTracesURL
+ DependsOn:
+ - EC2Role
+ - IAMInstanceProfile
+ - InstanceSecurityGroup
+
+ IAMInstanceProfile:
+ Type: 'AWS::IAM::InstanceProfile'
+ Properties:
+ InstanceProfileName: !Ref IAMInstanceProfileName
+ Path: /
+ Roles:
+ - !Ref IAMRole
+ DependsOn: EC2Role
+
+ EC2Role:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ Description: Allows EC2 to call AWS services on your behalf.
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Effect: Allow
+ Principal:
+ Service: ec2.amazonaws.com
+ Action: 'sts:AssumeRole'
+ ManagedPolicyArns:
+ - !Sub 'arn:aws:iam::${AWS::AccountId}:policy/${IAMPolicy}'
+ RoleName: !Ref IAMRole
+ DependsOn: EC2Policy
+
+ EC2Policy:
+ Type: 'AWS::IAM::ManagedPolicy'
+ Properties:
+ Description: Allows EC2 to call AWS services on your behalf.
+ Path: /
+ ManagedPolicyName: !Ref IAMPolicy
+ PolicyDocument:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: Allow
+ Action:
+ - logs:PutLogEvents
+ - logs:CreateLogGroup
+ - logs:CreateLogStream
+ - logs:DescribeLogStreams
+ - logs:DescribeLogGroups
+ - xray:PutTraceSegments
+ - xray:PutTelemetryRecords
+ - xray:GetSamplingRules
+ - xray:GetSamplingTargets
+ - xray:GetSamplingStatisticSummaries
+ - ssm:GetParameters
+ Resource: '*'
+
+ InstanceSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Enable SSH access via port 22
+ SecurityGroupIngress:
+ - IpProtocol: tcp
+ FromPort: 22
+ ToPort: 22
+ CidrIp: 0.0.0.0/0
+ - IpProtocol: tcp
+ FromPort: 4317
+ ToPort: 4317
+ CidrIp: 0.0.0.0/0
+ - IpProtocol: tcp
+ FromPort: 55680
+ ToPort: 55680
+ CidrIp: 0.0.0.0/0
+ - IpProtocol: tcp
+ FromPort: 55681
+ ToPort: 55681
+ CidrIp: 0.0.0.0/0
+ - IpProtocol: udp
+ FromPort: 2000
+ ToPort: 2000
+ CidrIp: 0.0.0.0/0
diff --git a/examples/non-kubernetes/aws-otel-ecs-ec2-deployment.yaml b/examples/non-kubernetes/aws-otel-ecs-ec2-deployment.yaml
new file mode 100644
index 000000000000..31e593fa76c9
--- /dev/null
+++ b/examples/non-kubernetes/aws-otel-ecs-ec2-deployment.yaml
@@ -0,0 +1,127 @@
+AWSTemplateFormatVersion: 2010-09-09
+Description: 'Template to install AWS OTel Collector on ECS in EC2 mode'
+Parameters:
+ IAMTaskRole:
+ Description: Task attached IAM role
+ Type: String
+ Default: SumologicAWSOTelColTaskRoleECSEC2
+ ConstraintDescription: must be an existing IAM role which will be attached to EC2 instance.
+ IAMExecutionRole:
+ Description: Task Execution attached IAM role
+ Type: String
+ Default: SumologicAWSOTelColExecutionRoleECSEC2
+ ConstraintDescription: must be an existing IAM role which will be attached to EC2 instance.
+ IAMPolicy:
+ Description: IAM Role attached IAM Policy
+ Type: String
+ Default: SumologicAWSOTelColPolicyECSEC2
+ ConstraintDescription: Must be an existing IAM Managed Policy which will be attached to IAM Role.
+ ClusterName:
+ Type: String
+ Description: Enter the name of your ECS cluster from which you want to collect telemetry data
+ SumoHttpTracesURL:
+ Type: String
+ Description: Enther the Sumologic HTTP Traces Endpoint URL
+ SumoAWSOTelColConfig:
+ Type: AWS::SSM::Parameter::Value
+ Default: sumologic-otel-col-config
+ Description: AWS SSM Parameter which contains OTel Collector config file
+Resources:
+ ECSTaskDefinition:
+ Type: 'AWS::ECS::TaskDefinition'
+ Properties:
+ Family: sumologic-aws-otel-collector-ec2
+ TaskRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/${IAMTaskRole}'
+ ExecutionRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/${IAMExecutionRole}'
+ ContainerDefinitions:
+ - logConfiguration:
+ logDriver: awslogs
+ options:
+ awslogs-create-group: 'True'
+ awslogs-group: /ecs/aws-otel-collector
+ awslogs-region: !Ref 'AWS::Region'
+ awslogs-stream-prefix: ecs
+ portMappings:
+ - hostPort: 2000
+ protocol: udp
+ containerPort: 2000
+ - hostPort: 4317
+ protocol: tcp
+ containerPort: 4317
+ - hostPort: 55681
+ protocol: tcp
+ containerPort: 55681
+ environment:
+ - name: SUMO_HTTP_TRACES_URL
+ value: !Ref SumoHttpTracesURL
+ - name: AOT_CONFIG_CONTENT
+ value: !Ref SumoAWSOTelColConfig
+ image: amazon/aws-otel-collector:latest
+ name: sumologic-aws-otel-collector
+ RequiresCompatibilities:
+ - EC2
+ Cpu: 1024
+ Memory: 2048
+ DependsOn:
+ - ECSTaskRole
+ - ECSExecutionRole
+ ECSReplicaService:
+ Type: 'AWS::ECS::Service'
+ Properties:
+ TaskDefinition: !Ref ECSTaskDefinition
+ Cluster: !Ref ClusterName
+ LaunchType: EC2
+ SchedulingStrategy: REPLICA
+ DesiredCount: 1
+ ServiceName: sumologic-aws-otel-col-svc-ecs-ec2
+ ECSTaskRole:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ Description: Allows ECS tasks to call AWS services on your behalf.
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Sid: ''
+ Effect: Allow
+ Principal:
+ Service: ecs-tasks.amazonaws.com
+ Action: 'sts:AssumeRole'
+ Policies:
+ - PolicyName: !Ref IAMPolicy
+ PolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Effect: Allow
+ Action:
+ - 'logs:PutLogEvents'
+ - 'logs:CreateLogGroup'
+ - 'logs:CreateLogStream'
+ - 'logs:DescribeLogStreams'
+ - 'logs:DescribeLogGroups'
+ - 'xray:PutTraceSegments'
+ - 'xray:PutTelemetryRecords'
+ - 'xray:GetSamplingRules'
+ - 'xray:GetSamplingTargets'
+ - 'xray:GetSamplingStatisticSummaries'
+ - 'ssm:GetParameters'
+ Resource: '*'
+ RoleName: !Ref IAMTaskRole
+ ECSExecutionRole:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ Description: >-
+ Allows ECS container agent makes calls to the Amazon ECS API on your
+ behalf.
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Sid: ''
+ Effect: Allow
+ Principal:
+ Service: ecs-tasks.amazonaws.com
+ Action: 'sts:AssumeRole'
+ ManagedPolicyArns:
+ - 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
+ - 'arn:aws:iam::aws:policy/CloudWatchLogsFullAccess'
+ - 'arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess'
+ RoleName: !Ref IAMExecutionRole
diff --git a/examples/non-kubernetes/aws-otel-ecs-fargate-deployment.yaml b/examples/non-kubernetes/aws-otel-ecs-fargate-deployment.yaml
new file mode 100644
index 000000000000..a656912ea0dc
--- /dev/null
+++ b/examples/non-kubernetes/aws-otel-ecs-fargate-deployment.yaml
@@ -0,0 +1,153 @@
+AWSTemplateFormatVersion: 2010-09-09
+Description: 'Template to install AWS OTel Collector on ECS in Fargate mode'
+Parameters:
+ IAMTaskRole:
+ Description: Task attached IAM role
+ Type: String
+ Default: SumologicAWSOTelColTaskRoleECSFargate
+ ConstraintDescription: must be an existing IAM role which will be attached to EC2 instance.
+ IAMExecutionRole:
+ Description: Task Execution attached IAM role
+ Type: String
+ Default: SumologicAWSOTelColExecutionRoleECSFargate
+ ConstraintDescription: must be an existing IAM role which will be attached to EC2 instance.
+ IAMPolicy:
+ Description: IAM Role attached IAM Policy
+ Type: String
+ Default: SumologicAWSOTelColPolicyECSFargate
+ ConstraintDescription: Must be an existing IAM Managed Policy which will be attached to IAM Role.
+ ClusterName:
+ Type: String
+ Description: Enter the name of your ECS cluster from which you want to collect telemetry data
+ SecurityGroups:
+ Type: CommaDelimitedList
+ Description: The list of SecurityGroupIds in your Virtual Private Cloud (VPC)
+ Subnets:
+ Type: CommaDelimitedList
+ Description: The list of Subnets in your Virtual Private Cloud (VPC)
+ SumoHttpTracesURL:
+ Type: String
+ Description: Enther the Sumologic HTTP Traces Endpoint URL
+ SumoAWSOTelColConfig:
+ Type: AWS::SSM::Parameter::Value
+ Default: sumologic-otel-col-config
+ Description: AWS SSM Parameter which contains OTel Collector config file
+Resources:
+ ECSTaskDefinition:
+ Type: 'AWS::ECS::TaskDefinition'
+ Properties:
+ Family: sumologic-aws-otel-collector-fargate
+ TaskRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/${IAMTaskRole}'
+ ExecutionRoleArn: !Sub 'arn:aws:iam::${AWS::AccountId}:role/${IAMExecutionRole}'
+ NetworkMode: awsvpc
+ ContainerDefinitions:
+ - LogConfiguration:
+ LogDriver: awslogs
+ Options:
+ awslogs-create-group: 'True'
+ awslogs-group: /ecs/aws-otel-collector
+ awslogs-region: !Ref 'AWS::Region'
+ awslogs-stream-prefix: ecs
+ portMappings:
+ - hostPort: 2000
+ protocol: udp
+ containerPort: 2000
+ - hostPort: 4317
+ protocol: tcp
+ containerPort: 4317
+ - hostPort: 6831
+ protocol: udp
+ containerPort: 6831
+ - hostPort: 6832
+ protocol: udp
+ containerPort: 6832
+ - hostPort: 9411
+ protocol: tcp
+ containerPort: 9411
+ - hostPort: 14250
+ protocol: tcp
+ containerPort: 14250
+ - hostPort: 14268
+ protocol: tcp
+ containerPort: 14268
+ - hostPort: 55681
+ protocol: tcp
+ containerPort: 55681
+ environment:
+ - name: SUMO_HTTP_TRACES_URL
+ value: !Ref SumoHttpTracesURL
+ - name: AOT_CONFIG_CONTENT
+ value: !Ref SumoAWSOTelColConfig
+ image: amazon/aws-otel-collector:latest
+ name: sumologic-aws-otel-collector
+ RequiresCompatibilities:
+ - FARGATE
+ Cpu: 1024
+ Memory: 2048
+ DependsOn:
+ - ECSTaskRole
+ - ECSExecutionRole
+ ECSReplicaService:
+ Type: 'AWS::ECS::Service'
+ Properties:
+ TaskDefinition: !Ref ECSTaskDefinition
+ Cluster: !Ref ClusterName
+ LaunchType: FARGATE
+ SchedulingStrategy: REPLICA
+ DesiredCount: 1
+ ServiceName: sumologic-aws-otel-col-svc-ecs-fargate
+ NetworkConfiguration:
+ AwsvpcConfiguration:
+ AssignPublicIp: ENABLED
+ SecurityGroups: !Ref SecurityGroups
+ Subnets: !Ref Subnets
+ ECSTaskRole:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ Description: Allows ECS tasks to call AWS services on your behalf.
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Effect: Allow
+ Principal:
+ Service: ecs-tasks.amazonaws.com
+ Action: 'sts:AssumeRole'
+ Policies:
+ - PolicyName: !Ref IAMPolicy
+ PolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Effect: Allow
+ Action:
+ - 'logs:PutLogEvents'
+ - 'logs:CreateLogGroup'
+ - 'logs:CreateLogStream'
+ - 'logs:DescribeLogStreams'
+ - 'logs:DescribeLogGroups'
+ - 'xray:PutTraceSegments'
+ - 'xray:PutTelemetryRecords'
+ - 'xray:GetSamplingRules'
+ - 'xray:GetSamplingTargets'
+ - 'xray:GetSamplingStatisticSummaries'
+ - 'ssm:GetParameters'
+ Resource: '*'
+ RoleName: !Ref IAMTaskRole
+ ECSExecutionRole:
+ Type: 'AWS::IAM::Role'
+ Properties:
+ Description: >-
+ Allows ECS container agent makes calls to the Amazon ECS API on your
+ behalf.
+ AssumeRolePolicyDocument:
+ Version: 2012-10-17
+ Statement:
+ - Sid: ''
+ Effect: Allow
+ Principal:
+ Service: ecs-tasks.amazonaws.com
+ Action: 'sts:AssumeRole'
+ ManagedPolicyArns:
+ - 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
+ - 'arn:aws:iam::aws:policy/CloudWatchLogsFullAccess'
+ - 'arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess'
+ RoleName: !Ref IAMExecutionRole
diff --git a/examples/non-kubernetes/gateway-configuration-template-with-cascading-filter.yaml b/examples/non-kubernetes/gateway-configuration-template-with-cascading-filter.yaml
new file mode 100644
index 000000000000..8b4b0aae26d7
--- /dev/null
+++ b/examples/non-kubernetes/gateway-configuration-template-with-cascading-filter.yaml
@@ -0,0 +1,104 @@
+receivers:
+ jaeger:
+ protocols:
+ thrift_compact:
+ endpoint: "0.0.0.0:6831"
+ thrift_binary:
+ endpoint: "0.0.0.0:6832"
+ grpc:
+ endpoint: "0.0.0.0:14250"
+ thrift_http:
+ endpoint: "0.0.0.0:14268"
+ opencensus:
+ endpoint: "0.0.0.0:55678"
+ otlp:
+ protocols:
+ grpc:
+ endpoint: "0.0.0.0:4317"
+ http:
+ endpoint: "0.0.0.0:55681"
+ zipkin:
+ endpoint: "0.0.0.0:9411"
+processors:
+ ## The memory_limiter processor is used to prevent out of memory situations on the collector.
+ memory_limiter:
+ ## check_interval is the time between measurements of memory usage for the
+ ## purposes of avoiding going over the limits. Defaults to zero, so no
+ ## checks will be performed. Values below 1 second are not recommended since
+ ## it can result in unnecessary CPU consumption.
+ check_interval: 5s
+
+ ## Maximum amount of memory, in MiB, targeted to be allocated by the process heap.
+ ## Note that typically the total memory usage of process will be about 50MiB higher
+ ## than this value.
+ limit_mib: 1900
+
+ ## Smart cascading filtering rules with preset limits.
+ cascading_filter:
+ ## (default = 30s): Wait time since the first span of a trace arrived before making
+ ## a filtering decision
+ decision_wait: 30s
+ ## (default = 50000): Maximum number of traces kept in memory
+ num_traces: 100000
+ ## (default = 0): Expected number of new traces (helps in allocating data structures)
+ expected_new_traces_per_sec: 1000
+ ## (default = 0): defines the global limit of maximum number of spans per second
+ ## that are going to be emitted
+ spans_per_second: 1660
+ ## (default = 0.2): Ratio of spans that are always probabilistically filtered
+ ## (hence might be used for metrics calculation).
+ probabilistic_filtering_ratio: 0.2
+ ## (no default): Policies used to make a sampling decision
+ policies:
+ - name: sampling-priority,
+ ## string_attribute: allows to specify conditions that need to be met
+ string_attribute: {
+ key: sampling.priority, values: [ "1" ]
+ },
+ ## Spans_per_second: max number of emitted spans per second by this policy.
+ spans_per_second: 500
+ - name: extended-duration
+ ## Spans_per_second: max number of emitted spans per second by this policy.
+ spans_per_second: 500
+ properties:
+ ## Selects the span if the duration is greater or equal the given
+ ## value (use s or ms as the suffix to indicate unit).
+ min_duration: 5s
+ - name: "status_code_condition",
+ ## Spans_per_second: max number of emitted spans per second by this policy.
+ spans_per_second: 500,
+ ## numeric_attribute: provides a list of conditions that need to be met
+ numeric_attribute: {
+ key: "http.status_code", min_value: 400, max_value: 999
+ }
+ - name: everything-else
+ ## This selects all traces, up the to the global limit
+ spans_per_second: -1
+
+ ## The batch processor accepts spans and places them into batches grouped by node and resource
+ batch:
+ ## Number of spans after which a batch will be sent regardless of time
+ send_batch_size: 256
+ ## Never more than this many spans are being sent in a batch
+ send_batch_max_size: 512
+ ## Time duration after which a batch will be sent regardless of size
+ timeout: 5s
+
+extensions:
+ health_check: {}
+exporters:
+ otlphttp:
+ traces_endpoint: ENDPOINT_URL
+ ## Following generates verbose logs with span content, useful to verify what
+ ## metadata is being tagged. To enable, uncomment and add "logging" to exporters below.
+ ## There are two levels that could be used: `debug` and `info` with the former
+ ## being much more verbose and including (sampled) spans content
+ # logging:
+ # loglevel: debug
+service:
+ extensions: [health_check]
+ pipelines:
+ traces:
+ receivers: [jaeger, opencensus, otlp, zipkin]
+ processors: [memory_limiter, cascading_filter, batch]
+ exporters: [otlphttp]
diff --git a/examples/non-kubernetes/gateway-configuration-template.yaml b/examples/non-kubernetes/gateway-configuration-template.yaml
new file mode 100644
index 000000000000..b15e832b80be
--- /dev/null
+++ b/examples/non-kubernetes/gateway-configuration-template.yaml
@@ -0,0 +1,62 @@
+receivers:
+ jaeger:
+ protocols:
+ thrift_compact:
+ endpoint: "0.0.0.0:6831"
+ thrift_binary:
+ endpoint: "0.0.0.0:6832"
+ grpc:
+ endpoint: "0.0.0.0:14250"
+ thrift_http:
+ endpoint: "0.0.0.0:14268"
+ opencensus:
+ endpoint: "0.0.0.0:55678"
+ otlp:
+ protocols:
+ grpc:
+ endpoint: "0.0.0.0:4317"
+ http:
+ endpoint: "0.0.0.0:55681"
+ zipkin:
+ endpoint: "0.0.0.0:9411"
+processors:
+ ## The memory_limiter processor is used to prevent out of memory situations on the collector.
+ memory_limiter:
+ ## check_interval is the time between measurements of memory usage for the
+ ## purposes of avoiding going over the limits. Defaults to zero, so no
+ ## checks will be performed. Values below 1 second are not recommended since
+ ## it can result in unnecessary CPU consumption.
+ check_interval: 5s
+
+ ## Maximum amount of memory, in MiB, targeted to be allocated by the process heap.
+ ## Note that typically the total memory usage of process will be about 50MiB higher
+ ## than this value.
+ limit_mib: 1900
+
+ ## The batch processor accepts spans and places them into batches grouped by node and resource
+ batch:
+ ## Number of spans after which a batch will be sent regardless of time
+ send_batch_size: 256
+ ## Never more than this many spans are being sent in a batch
+ send_batch_max_size: 512
+ ## Time duration after which a batch will be sent regardless of size
+ timeout: 5s
+
+extensions:
+ health_check: {}
+exporters:
+ otlphttp:
+ traces_endpoint: ENDPOINT_URL
+ ## Following generates verbose logs with span content, useful to verify what
+ ## metadata is being tagged. To enable, uncomment and add "logging" to exporters below.
+ ## There are two levels that could be used: `debug` and `info` with the former
+ ## being much more verbose and including (sampled) spans content
+ # logging:
+ # loglevel: debug
+service:
+ extensions: [health_check]
+ pipelines:
+ traces:
+ receivers: [jaeger, opencensus, otlp, zipkin]
+ processors: [memory_limiter, batch]
+ exporters: [otlphttp]
diff --git a/go.mod b/go.mod
index ef698791fcb0..9e1e94de4990 100644
--- a/go.mod
+++ b/go.mod
@@ -49,18 +49,21 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.38.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.38.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.38.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.38.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.38.0
@@ -108,6 +111,12 @@ require (
require github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver v0.38.0
+require (
+ github.com/onsi/gomega v1.14.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicsyslogprocessor v0.0.0-00010101000000-000000000000
+ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect
+)
+
require (
cloud.google.com/go v0.97.0 // indirect
cloud.google.com/go/monitoring v0.1.0 // indirect
@@ -255,7 +264,6 @@ require (
github.com/observiq/go-syslog/v3 v3.0.2 // indirect
github.com/olivere/elastic v6.2.37+incompatible // indirect
github.com/onsi/ginkgo v1.16.4 // indirect
- github.com/onsi/gomega v1.14.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.38.0 // indirect
@@ -277,7 +285,6 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.38.0 // indirect
github.com/open-telemetry/opentelemetry-log-collection v0.22.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect
github.com/openlyinc/pointy v1.1.2 // indirect
github.com/openshift/api v0.0.0-20210521075222-e273a339932a // indirect
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect
@@ -387,6 +394,8 @@ require (
sigs.k8s.io/yaml v1.2.0 // indirect
)
+replace github.com/influxdata/telegraf => github.com/sumologic/telegraf v1.17.3-sumo
+
// Replace references to modules that are in this repository with their relateive paths
// so that we always build with current (latest) version of the source code.
@@ -596,14 +605,22 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/filt
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor => ./processor/groupbyattrsprocessor
-replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor => ./processor/groupbytraceprocessor
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor => ./processor/cascadingfilterprocessor/
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor => ./processor/groupbytraceprocessor/
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor => ./processor/k8sattributesprocessor/
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor => ./processor/k8sprocessor/
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor => ./processor/sourceprocessor/
+
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => ./processor/resourcedetectionprocessor/
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor => ./processor/resourceprocessor/
+replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicsyslogprocessor => ./processor/sumologicsyslogprocessor/
+
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor => ./processor/metricstransformprocessor/
replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor => ./processor/metricsgenerationprocessor/
diff --git a/internal/components/components.go b/internal/components/components.go
index a8d8e4e5d1c0..1447206ea78a 100644
--- a/internal/components/components.go
+++ b/internal/components/components.go
@@ -63,20 +63,24 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension"
"github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicsyslogprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver"
@@ -244,7 +248,10 @@ func Components() (component.Factories, error) {
attributesprocessor.NewFactory(),
filterprocessor.NewFactory(),
groupbyattrsprocessor.NewFactory(),
+ cascadingfilterprocessor.NewFactory(),
+ sourceprocessor.NewFactory(),
groupbytraceprocessor.NewFactory(),
+ k8sprocessor.NewFactory(),
k8sattributesprocessor.NewFactory(),
metricstransformprocessor.NewFactory(),
metricsgenerationprocessor.NewFactory(),
@@ -257,6 +264,7 @@ func Components() (component.Factories, error) {
spanprocessor.NewFactory(),
cumulativetodeltaprocessor.NewFactory(),
deltatorateprocessor.NewFactory(),
+ sumologicsyslogprocessor.NewFactory(),
}
for _, pr := range factories.Processors {
processors = append(processors, pr)
diff --git a/internal/components/processors_test.go b/internal/components/processors_test.go
index f26b70855397..65160ef9cb9d 100644
--- a/internal/components/processors_test.go
+++ b/internal/components/processors_test.go
@@ -93,7 +93,7 @@ func TestDefaultProcessors(t *testing.T) {
},
}
- assert.Equal(t, len(tests)+11 /* not tested */, len(procFactories))
+ assert.Equal(t, len(tests)+14 /* not tested */, len(procFactories))
for _, tt := range tests {
t.Run(string(tt.processor), func(t *testing.T) {
factory, ok := procFactories[tt.processor]
diff --git a/otelcolbuilder/.gitignore b/otelcolbuilder/.gitignore
new file mode 100644
index 000000000000..b3087b09750d
--- /dev/null
+++ b/otelcolbuilder/.gitignore
@@ -0,0 +1 @@
+cmd/
diff --git a/otelcolbuilder/.otelcol-builder.yaml b/otelcolbuilder/.otelcol-builder.yaml
new file mode 100644
index 000000000000..611ac2c32093
--- /dev/null
+++ b/otelcolbuilder/.otelcol-builder.yaml
@@ -0,0 +1,39 @@
+dist:
+ name: otelcol-sumo
+ description: Sumo Logic OpenTelemetry Collector distribution
+
+ # the module name for the new distribution, following Go mod conventions. Optional, but recommended.
+ module: github.com/SumoLogic/opentelemetry-collector-builder
+
+ otelcol_version: 0.24.0 # the OpenTelemetry Collector version to use as base for the distribution.
+ output_path: ./cmd/ # the path to write the output (sources and binary).
+
+exporters:
+ - gomod: "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter v0.24.0"
+
+processors:
+ - gomod: "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor v0.24.0"
+ - gomod: "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor v0.24.0"
+ - gomod: "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor v0.24.0"
+
+receivers:
+ - gomod: "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/telegrafreceiver v0.24.0"
+
+# Replacement paths are relative to the output_path (location of source files)
+replaces:
+ # Customized processors
+ - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor => ./../../processor/cascadingfilterprocessor
+ # -
+ - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor => ./../../processor/k8sprocessor
+ - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ./../../internal/k8sconfig
+ # -
+ - github.com/open-telemetry/opentelemetry-collector-contrib/processor/sourceprocessor => ./../../processor/sourceprocessor
+
+ # ----------------------------------------------------------------------------
+ # Customized receivers
+ - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/telegrafreceiver => ./../../receiver/telegrafreceiver
+ - github.com/influxdata/telegraf => github.com/sumologic/telegraf v1.17.3-sumo
+
+ # ----------------------------------------------------------------------------
+ # Customized core
+ - go.opentelemetry.io/collector => github.com/SumoLogic/opentelemetry-collector v0.24.0-sumo
diff --git a/otelcolbuilder/Makefile b/otelcolbuilder/Makefile
new file mode 100644
index 000000000000..3f4b7cdbd717
--- /dev/null
+++ b/otelcolbuilder/Makefile
@@ -0,0 +1,8 @@
+install:
+ go install github.com/open-telemetry/opentelemetry-collector-builder@v0.24.0
+
+install-prego1.16:
+ GO111MODULE=on go get github.com/open-telemetry/opentelemetry-collector-builder@v0.24.0
+
+build:
+ opentelemetry-collector-builder --config .otelcol-builder.yaml
diff --git a/processor/cascadingfilterprocessor/Makefile b/processor/cascadingfilterprocessor/Makefile
new file mode 100644
index 000000000000..c1496226e590
--- /dev/null
+++ b/processor/cascadingfilterprocessor/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
\ No newline at end of file
diff --git a/processor/cascadingfilterprocessor/README.md b/processor/cascadingfilterprocessor/README.md
new file mode 100644
index 000000000000..53719b5c948f
--- /dev/null
+++ b/processor/cascadingfilterprocessor/README.md
@@ -0,0 +1,188 @@
+# Cascading Filter Processor
+
+Supported pipeline types: traces
+
+The Cascading Filter processor is a fork of
+[tailsamplingprocessor][tailsamplingprocessor] which allows for defining smart
+cascading filtering rules with preset limits.
+
+[tailsamplingprocessor]:https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/tailsamplingprocessor
+
+## Processor configuration
+
+The following configuration options should be configured as desired:
+- `trace_reject_rules` (no default): policies used to explicitly drop matching traces
+- `trace_accept_rules` (no default): policies used to pass matching traces, within a specified limit
+- `spans_per_second` (no default): maximum total number of emitted spans per second.
+When set, the total number of spans each second is never exceeded. This value can be also calculated
+automatically when `probabilistic_filtering_rate` and/or `trace_accept_rules` are set
+- `probabilistic_filtering_rate` (no default): number of spans that are always probabilistically filtered
+(hence might be used for metrics calculation).
+- `probabilistic_filtering_ratio` (no default): alternative way to specify the ratio of spans which
+are always probabilistically filtered (hence might be used for metrics calculation). The ratio is
+specified as portion of output spans (defined by `spans_per_second`) rather than input spans.
+So filtering rate of `0.2` and max span rate of `1500` produces at most `300` probabilistically sampled spans per second.
+
+The following configuration options can also be modified:
+- `decision_wait` (default = 30s): Wait time since the first span of a trace before making a filtering decision
+- `num_traces` (default = 100000): Max number of traces for which decisions are kept in memory
+- `expected_new_traces_per_sec` (default = 0): Expected number of new traces (helps in allocating data structures)
+
+Whenever rate limiting is applied, only full traces are accepted (if trace won't fit within the limit,
+it will never be filtered). For spans that are arriving late, previous decision are kept for some time.
+
+## Updated span attributes
+
+The processor modifies each span attributes, by setting following two attributes:
+- `sampling.rule`: describing if `probabilistic` or `filtered` policy was applied
+- `sampling.probability`: describing the effective sampling rate in case of `probabilistic` rule. E.g. if there were `5000`
+spans evaluated in a given second, with `1500` max total spans per second and `0.2` filtering ratio, at most `300` spans
+would be selected by such rule. This would effect in having `sampling.probability=0.06` (`300/5000=0.6`). If such value is already
+set by head-based (or other) sampling, it's multiplied by the calculated value.
+
+## Rejected trace configuration
+
+It is possible to specify conditions for traces which should be fully dropped, without including them in probabilistic
+filtering or additional policy evaluation. This typically happens e.g. when healthchecks are filtered-out.
+
+Each of the specified drop rules has several properties:
+- `name` (required): identifies the rule
+- `numeric_attribute: {key: , min_value: , max_value: }`: selects span by matching numeric
+ attribute (either at resource of span level)
+- `string_attribute: {key: , values: [, ]}`: selects span by matching string attribute that is one
+ of the provided values (either at resource of span level)
+- `name_pattern: `: selects the span if its operation name matches the provided regular expression
+
+
+## Accepted trace configuration
+
+Each defined policy is evaluated with order as specified in config. There are several properties:
+- `name` (required): identifies the policy
+- `spans_per_second` (default = 0): defines maximum number of spans per second that could be handled by this policy. When set to `-1`,
+it selects the traces only if the global limit is not exceeded by other policies (however, without further limitations)
+
+Additionally, each of the policy might have any of the following filtering criteria defined. They are evaluated for
+each of the trace spans. If at least one span matching all defined criteria is found, the trace is selected:
+- `numeric_attribute: {key: , min_value: , max_value: }`: selects span by matching numeric
+attribute (either at resource of span level)
+- `string_attribute: {key: , values: [, ]}`: selects span by matching string attribute that is one
+of the provided values (either at resource of span level)
+- `properties: { min_number_of_errors: }`: selects the trace if it has at least provided number of errors
+(determined based on the span status field value)
+- `properties: { min_number_of_spans: }`: selects the trace if it has at least provided number of spans
+- `properties: { min_duration: }`: selects the span if the duration is greater or equal the given value
+(use `s` or `ms` as the suffix to indicate unit)
+- `properties: { name_pattern: `}: selects the span if its operation name matches the provided regular expression
+
+To invert the decision (which is still a subject to rate limiting), additional property can be configured:
+- `invert_match: ` (default=`false`): when set to `true`, the opposite decision is selected for the trace. E.g.
+if trace matches a given string attribute and `invert_match=true`, then the trace is not selected
+
+## Limiting the number of spans
+
+There are two `spans_per_second` settings. The global one and the policy-one.
+
+While evaluating traces, the limit is evaluated first on the policy level and then on the global level. The sum
+of all `spans_per_second` rates might be actually higher than the global limit, but the latter will never be
+exceeded (so some of the traces will not be included).
+
+For example, we have 3 policies: `A, B, C`. Each of them has limit of `300` spans per second and the global limit
+is `500` spans per second. Now, lets say, that there for each of the policies there were 5 distinct traces, each
+having `100` spans and matching policy criteria (lets call them `A1, A2, ... B1, B2...` and so forth:
+
+`Policy A`: `A1, A2, A3`
+`Policy B`: `B1, B2, B3`
+`Policy C`: `C1, C2, C3`
+
+However, in total, this is `900` spans, which is more than the global limit of `500` spans/second. The processor
+will take care of that and randomly select only the spans up to the global limit. So eventually, it might
+for example send further only following traces: `A1, A2, B1, C2, C5` and filter out the others.
+
+## Examples
+
+### Just filtering out healthchecks
+
+Following example will drop all traces that match either of the following criteria:
+* there is a span which name starts with "health"
+* there is a span coming from a service named "healthcheck"
+
+```yaml
+processors:
+ cascading_filter:
+ trace_reject_filters:
+ - name: remove-all-traces-with-health-span
+ name_pattern: "health.*"
+ - name: remove-all-traces-with-healthcheck-service
+ string_attribute: {key: service.name, values: [healthcheck]}
+ ```
+
+### Filtering out healhtchecks and traffic shaping
+
+In the following example few more conditions were added:
+* probabilistic filtering was set; it will randomly select traces for a total of up to 100 spans/second
+* two traffic-shaping rules are applied:
+ * traces which have minimum duration of 3s are selected (for up to 500 spans/second)
+ * traces which have at least 3 error spans are selected (for up to 500 spans/second)
+
+Basing on those rules, at most 1100 spans/second will be outputted.
+
+```yaml
+cascadingfilter:
+ probabilistic_filtering_rate: 100
+ trace_reject_filters:
+ - name: remove-all-traces-with-health-span
+ name_pattern: "health.*"
+ - name: remove-all-traces-with-healthcheck-service
+ string_attribute: {key: service.name, values: [healthcheck]}
+ trace_accept_filters:
+ - name: tail-based-duration
+ properties:
+ min_duration: 3s
+ spans_per_second: 500 # <- adjust the output traffic level
+ - name: tail-based-errors
+ properties:
+ min_number_of_errors: 3
+ spans_per_second: 500 # <- adjust the output traffic level
+```
+
+### Advanced configuration
+
+It is additionally possible to use adaptive sampling, which will split the
+total spans per second budget across all the rules evenly (for up to specified limit).
+Additionally, it can be set that if there's any budget left, it can be filled with random traces.
+
+```yaml
+cascadingfilter:
+ decision_wait: 30s
+ num_traces: 200000
+ expected_new_traces_per_sec: 2000
+ spans_per_second: 1800
+ probabilistic_filtering_rate: 100
+ trace_reject_filters:
+ - name: remove-all-traces-with-health-span
+ name_pattern: "health.*"
+ - name: remove-all-traces-with-healthcheck-service
+ string_attribute: {key: service.name, values: [healthcheck]}
+ trace_accept_filters:
+ - name: tail-based-duration
+ properties:
+ min_duration: 3s
+ spans_per_second: 500 # <- adjust the output traffic level
+ - name: tail-based-errors
+ properties:
+ min_number_of_errors: 3
+ spans_per_second: 500 # <- adjust the output traffic level
+ - name: traces-with-foo-span-and-high-latency
+ properties:
+ name_pattern: "foo.*"
+ min_duration: 10s
+ spans_per_second: 1000 # <- adjust the output traffic level
+ - name: traces-with-some-attribute
+ string_attribute: {key: important-key, values: [value1, value2]}
+ spans_per_second: 300 # <- adjust the output traffic level
+ - name: everything_else
+ spans_per_second: -1 # If there's anything left in the budget, it will randomly select remaining traces
+```
+
+Refer to [cascading_filter_config.yaml](./testdata/cascading_filter_config.yaml) for detailed
+examples on using the processor.
diff --git a/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter.go b/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter.go
new file mode 100644
index 000000000000..4f8cc2661474
--- /dev/null
+++ b/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+package bigendianconverter
+
+import (
+ "encoding/binary"
+
+ "go.opentelemetry.io/collector/model/pdata"
+)
+
+// NOTE:
+// This code was copied over from:
+// https://github.com/open-telemetry/opentelemetry-collector/blob/v0.28.0/internal/idutils/big_endian_converter.go
+// to allow processor tests to still run as they used to.
+
+// UInt64ToTraceID converts the pair of uint64 representation of a TraceID to pdata.TraceID.
+func UInt64ToTraceID(high, low uint64) pdata.TraceID {
+ traceID := [16]byte{}
+ binary.BigEndian.PutUint64(traceID[:8], high)
+ binary.BigEndian.PutUint64(traceID[8:], low)
+ return pdata.NewTraceID(traceID)
+}
+
+// SpanIDToUInt64 converts the pdata.SpanID to uint64 representation.
+func SpanIDToUInt64(spanID pdata.SpanID) uint64 {
+ bytes := spanID.Bytes()
+ return binary.BigEndian.Uint64(bytes[:])
+}
+
+// UInt64ToSpanID converts the uint64 representation of a SpanID to pdata.SpanID.
+func UInt64ToSpanID(id uint64) pdata.SpanID {
+ spanID := [8]byte{}
+ binary.BigEndian.PutUint64(spanID[:], id)
+ return pdata.NewSpanID(spanID)
+}
diff --git a/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter_test.go b/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter_test.go
new file mode 100644
index 000000000000..c601a15899f0
--- /dev/null
+++ b/processor/cascadingfilterprocessor/bigendianconverter/big_endian_converter_test.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+package bigendianconverter
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.opentelemetry.io/collector/model/pdata"
+)
+
+func TestUInt64ToTraceIDConversion(t *testing.T) {
+ assert.Equal(t,
+ pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
+ UInt64ToTraceID(0, 0),
+ "Failed 0 conversion:")
+ assert.Equal(t,
+ pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}),
+ UInt64ToTraceID(256*256+256+1, 256+1),
+ "Failed simple conversion:")
+ assert.Equal(t,
+ pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}),
+ UInt64ToTraceID(0, 5),
+ "Failed to convert 0 high:")
+ assert.Equal(t,
+ UInt64ToTraceID(5, 0),
+ pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
+ UInt64ToTraceID(5, 0),
+ "Failed to convert 0 low:")
+ assert.Equal(t,
+ pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}),
+ UInt64ToTraceID(math.MaxUint64, 5),
+ "Failed to convert MaxUint64:")
+}
+
+func TestUInt64ToSpanIDConversion(t *testing.T) {
+ assert.Equal(t,
+ pdata.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
+ UInt64ToSpanID(0),
+ "Failed 0 conversion:")
+ assert.Equal(t,
+ pdata.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01}),
+ UInt64ToSpanID(256*256+256+1),
+ "Failed simple conversion:")
+ assert.Equal(t,
+ pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}),
+ UInt64ToSpanID(math.MaxUint64),
+ "Failed to convert MaxUint64:")
+}
+
+func TestSpanIdUInt64RoundTrip(t *testing.T) {
+ w := uint64(0x0001020304050607)
+ assert.Equal(t, w, SpanIDToUInt64(UInt64ToSpanID(w)))
+}
diff --git a/processor/cascadingfilterprocessor/cascading_test.go b/processor/cascadingfilterprocessor/cascading_test.go
new file mode 100644
index 000000000000..a664aa9636f1
--- /dev/null
+++ b/processor/cascadingfilterprocessor/cascading_test.go
@@ -0,0 +1,256 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/config"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+
+ cfconfig "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/sampling"
+)
+
+var testValue = 10 * time.Millisecond
+var probabilisticFilteringRate = int32(10)
+var healthCheckPattern = "health"
+var cfg = cfconfig.Config{
+ ProcessorSettings: &config.ProcessorSettings{},
+ DecisionWait: 2 * time.Second,
+ NumTraces: 100,
+ ExpectedNewTracesPerSec: 100,
+ SpansPerSecond: 1000,
+ PolicyCfgs: []cfconfig.TraceAcceptCfg{
+ {
+ Name: "duration",
+ SpansPerSecond: 10,
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ MinDuration: &testValue,
+ },
+ },
+ {
+ Name: "everything else",
+ SpansPerSecond: -1,
+ },
+ },
+ TraceRejectCfgs: []cfconfig.TraceRejectCfg{
+ {
+ Name: "health-check",
+ NamePattern: &healthCheckPattern,
+ },
+ },
+}
+
+var cfgJustDropping = cfconfig.Config{
+ ProcessorSettings: &config.ProcessorSettings{},
+ DecisionWait: 2 * time.Second,
+ TraceRejectCfgs: []cfconfig.TraceRejectCfg{
+ {
+ Name: "health-check",
+ NamePattern: &healthCheckPattern,
+ },
+ },
+}
+
+var cfgAutoRate = cfconfig.Config{
+ ProcessorSettings: &config.ProcessorSettings{},
+ DecisionWait: 2 * time.Second,
+ ProbabilisticFilteringRate: &probabilisticFilteringRate,
+ PolicyCfgs: []cfconfig.TraceAcceptCfg{
+ {
+ Name: "duration",
+ SpansPerSecond: 20,
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ MinDuration: &testValue,
+ },
+ },
+ },
+ TraceRejectCfgs: []cfconfig.TraceRejectCfg{
+ {
+ Name: "health-check",
+ NamePattern: &healthCheckPattern,
+ },
+ },
+}
+
+func fillSpan(span *pdata.Span, durationMicros int64) {
+ nowTs := time.Now().UnixNano()
+ startTime := nowTs - durationMicros*1000
+
+ span.Attributes().InsertInt("foo", 55)
+ span.SetStartTimestamp(pdata.Timestamp(startTime))
+ span.SetEndTimestamp(pdata.Timestamp(nowTs))
+}
+
+func createTrace(fsp *cascadingFilterSpanProcessor, numSpans int, durationMicros int64) *sampling.TraceData {
+ var traceBatches []pdata.Traces
+
+ traces := pdata.NewTraces()
+ rs := traces.ResourceSpans().AppendEmpty()
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+
+ spans := ils.Spans()
+ spans.EnsureCapacity(numSpans)
+
+ for i := 0; i < numSpans; i++ {
+ span := spans.AppendEmpty()
+
+ fillSpan(&span, durationMicros)
+ }
+
+ traceBatches = append(traceBatches, traces)
+
+ return &sampling.TraceData{
+ Mutex: sync.Mutex{},
+ Decisions: make([]sampling.Decision, len(fsp.traceAcceptRules)),
+ ArrivalTime: time.Time{},
+ DecisionTime: time.Time{},
+ SpanCount: int32(numSpans),
+ ReceivedBatches: traceBatches,
+ }
+}
+
+func createCascadingEvaluator(t *testing.T) *cascadingFilterSpanProcessor {
+ return createCascadingEvaluatorWithConfig(t, cfg)
+}
+
+func createCascadingEvaluatorWithConfig(t *testing.T, conf cfconfig.Config) *cascadingFilterSpanProcessor {
+ cascading, err := newCascadingFilterSpanProcessor(zap.NewNop(), nil, conf)
+ assert.NoError(t, err)
+ return cascading
+}
+
+func TestSampling(t *testing.T) {
+ cascading := createCascadingEvaluator(t)
+
+ decision, policy := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{0}), createTrace(cascading, 8, 1000000))
+ require.NotNil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+
+ decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(cascading, 1000, 1000))
+ require.Equal(t, sampling.SecondChance, decision)
+}
+
+func TestSecondChanceEvaluation(t *testing.T) {
+ cascading := createCascadingEvaluator(t)
+
+ decision, _ := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{0}), createTrace(cascading, 8, 1000))
+ require.Equal(t, sampling.SecondChance, decision)
+
+ decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(cascading, 8, 1000))
+ require.Equal(t, sampling.SecondChance, decision)
+
+ // TODO: This could me optimized to make a decision within cascadingfilter processor, as such span would never fit anyway
+ //decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(8000, 1000), metrics)
+ //require.Equal(t, sampling.NotSampled, decision)
+}
+
+func TestProbabilisticFilter(t *testing.T) {
+ ratio := float32(0.5)
+ cfg.ProbabilisticFilteringRatio = &ratio
+ cascading := createCascadingEvaluator(t)
+
+ trace1 := createTrace(cascading, 8, 1000000)
+ decision, _ := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{0}), trace1)
+ require.Equal(t, sampling.Sampled, decision)
+ require.True(t, trace1.SelectedByProbabilisticFilter)
+
+ trace2 := createTrace(cascading, 800, 1000000)
+ decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), trace2)
+ require.Equal(t, sampling.SecondChance, decision)
+ require.False(t, trace2.SelectedByProbabilisticFilter)
+
+ ratio = float32(0.0)
+ cfg.ProbabilisticFilteringRatio = &ratio
+}
+
+func TestDropTraces(t *testing.T) {
+ cascading := createCascadingEvaluator(t)
+
+ trace1 := createTrace(cascading, 8, 1000000)
+ trace2 := createTrace(cascading, 8, 1000000)
+ trace2.ReceivedBatches[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(2).SetName("health-check")
+ require.False(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{0}), trace1))
+ require.True(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{0}), trace2))
+}
+
+func TestDropTracesAndNotLimitOthers(t *testing.T) {
+ cascading := createCascadingEvaluatorWithConfig(t, cfgJustDropping)
+
+ trace1 := createTrace(cascading, 1000, 1000000)
+ trace2 := createTrace(cascading, 8, 1000000)
+ trace2.ReceivedBatches[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(2).SetName("health-check")
+ trace3 := createTrace(cascading, 5000, 1000000)
+
+ decision, policy := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{0}), trace1)
+ require.Nil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+ require.False(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{0}), trace1))
+
+ decision, policy = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), trace2)
+ require.Nil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+ require.True(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{1}), trace2))
+
+ decision, policy = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{2}), trace3)
+ require.Nil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+ require.False(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{2}), trace3))
+}
+
+func TestDropTracesAndAutoRateOthers(t *testing.T) {
+ cascading := createCascadingEvaluatorWithConfig(t, cfgAutoRate)
+
+ trace1 := createTrace(cascading, 20, 1000000)
+ trace2 := createTrace(cascading, 8, 1000000)
+ trace2.ReceivedBatches[0].ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(2).SetName("health-check")
+ trace3 := createTrace(cascading, 20, 1000000)
+
+ decision, policy := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{0}), trace1)
+ require.NotNil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+ require.False(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{0}), trace1))
+
+ decision, policy = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), trace2)
+ require.NotNil(t, policy)
+ require.Equal(t, sampling.Sampled, decision)
+ require.True(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{1}), trace2))
+
+ decision, policy = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{2}), trace3)
+ require.Nil(t, policy)
+ require.Equal(t, sampling.NotSampled, decision)
+ require.False(t, cascading.shouldBeDropped(pdata.NewTraceID([16]byte{2}), trace3))
+}
+
+//func TestSecondChanceReevaluation(t *testing.T) {
+// cascading := createCascadingEvaluator()
+//
+// decision, _ := cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(100, 1000), metrics)
+// require.Equal(t, sampling.Sampled, decision)
+//
+// // Too much
+// decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(1000, 1000), metrics)
+// require.Equal(t, sampling.NotSampled, decision)
+//
+// // Just right
+// decision, _ = cascading.makeProvisionalDecision(pdata.NewTraceID([16]byte{1}), createTrace(900, 1000), metrics)
+// require.Equal(t, sampling.Sampled, decision)
+//}
diff --git a/processor/cascadingfilterprocessor/config/config.go b/processor/cascadingfilterprocessor/config/config.go
new file mode 100644
index 000000000000..75e539be5de5
--- /dev/null
+++ b/processor/cascadingfilterprocessor/config/config.go
@@ -0,0 +1,115 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/config"
+)
+
+// TraceAcceptCfg holds the common configuration to all sampling policies.
+type TraceAcceptCfg struct {
+ // Name given to the instance of the policy to make easy to identify it in metrics and logs.
+ Name string `mapstructure:"name"`
+ // Configs for numeric attribute filter sampling policy evaluator.
+ NumericAttributeCfg *NumericAttributeCfg `mapstructure:"numeric_attribute"`
+ // Configs for string attribute filter sampling policy evaluator.
+ StringAttributeCfg *StringAttributeCfg `mapstructure:"string_attribute"`
+ // Configs for properties sampling policy evaluator.
+ PropertiesCfg PropertiesCfg `mapstructure:"properties"`
+ // SpansPerSecond specifies the rule budget that should never be exceeded for it
+ SpansPerSecond int32 `mapstructure:"spans_per_second"`
+ // InvertMatch specifies if the match should be inverted. Default: false
+ InvertMatch bool `mapstructure:"invert_match"`
+}
+
+// PropertiesCfg holds the configurable settings to create a duration filter
+type PropertiesCfg struct {
+ // NamePattern (optional) describes a regular expression that must be met by any span operation name.
+ NamePattern *string `mapstructure:"name_pattern"`
+ // MinDuration (optional) is the minimum duration of trace to be considered a match.
+ MinDuration *time.Duration `mapstructure:"min_duration"`
+ // MinNumberOfSpans (optional) is the minimum number spans that must be present in a matching trace.
+ MinNumberOfSpans *int `mapstructure:"min_number_of_spans"`
+ // MinNumberOfErrors (optional) is the minimum number of spans with the status set to error that must be present in a matching trace.
+ MinNumberOfErrors *int `mapstructure:"min_number_of_errors"`
+}
+
+// NumericAttributeCfg holds the configurable settings to create a numeric attribute filter
+// sampling policy evaluator.
+type NumericAttributeCfg struct {
+ // Tag that the filter is going to be matching against.
+ Key string `mapstructure:"key"`
+ // MinValue is the minimum value of the attribute to be considered a match.
+ MinValue int64 `mapstructure:"min_value"`
+ // MaxValue is the maximum value of the attribute to be considered a match.
+ MaxValue int64 `mapstructure:"max_value"`
+}
+
+// StringAttributeCfg holds the configurable settings to create a string attribute filter
+// sampling policy evaluator.
+type StringAttributeCfg struct {
+ // Tag that the filter is going to be matching against.
+ Key string `mapstructure:"key"`
+ // Values is the set of attribute values that if any is equal to the actual attribute value to be considered a match.
+ Values []string `mapstructure:"values"`
+}
+
+// TraceRejectCfg holds the configurable settings which drop all traces matching the specified criteria (all of them)
+// before further processing
+type TraceRejectCfg struct {
+ // Name given to the instance of dropped traces policy to make easy to identify it in metrics and logs.
+ Name string `mapstructure:"name"`
+ // NumericAttributeCfg (optional) configs numeric attribute filter evaluator
+ NumericAttributeCfg *NumericAttributeCfg `mapstructure:"numeric_attribute"`
+ // StringAttributeCfg (config) configs string attribute filter evaluator.
+ StringAttributeCfg *StringAttributeCfg `mapstructure:"string_attribute"`
+ // NamePattern (optional) describes a regular expression that must be met by any span operation name
+ NamePattern *string `mapstructure:"name_pattern"`
+}
+
+// Config holds the configuration for cascading-filter-based sampling.
+type Config struct {
+ *config.ProcessorSettings `mapstructure:"-"`
+ // DecisionWait is the desired wait time from the arrival of the first span of
+ // trace until the decision about sampling it or not is evaluated.
+ DecisionWait time.Duration `mapstructure:"decision_wait"`
+ // SpansPerSecond specifies the total budget that should never be exceeded.
+ // When set to zero (default value) - it is automatically calculated basing on the accept trace and
+ // probabilistic filtering rate (if present)
+ SpansPerSecond int32 `mapstructure:"spans_per_second"`
+ // ProbabilisticFilteringRatio describes which part (0.0-1.0) of the SpansPerSecond budget
+ // is exclusively allocated for probabilistically selected spans
+ ProbabilisticFilteringRatio *float32 `mapstructure:"probabilistic_filtering_ratio"`
+ // ProbabilisticFilteringRate describes how many spans per second are exclusively allocated
+ // for probabilistically selected spans
+ ProbabilisticFilteringRate *int32 `mapstructure:"probabilistic_filtering_rate"`
+ // NumTraces is the number of traces kept on memory. Typically, most of the data
+ // of a trace is released after a sampling decision is taken.
+ NumTraces uint64 `mapstructure:"num_traces"`
+ // ExpectedNewTracesPerSec sets the expected number of new traces sending to the Cascading Filter processor
+ // per second. This helps with allocating data structures with closer to actual usage size.
+ ExpectedNewTracesPerSec uint64 `mapstructure:"expected_new_traces_per_sec"`
+ // PolicyCfgs (depracated) sets the cascading-filter-based sampling policy which makes a sampling decision
+ // for a given trace when requested.
+ PolicyCfgs []TraceAcceptCfg `mapstructure:"policies"`
+ // TraceAcceptCfgs sets the cascading-filter-based sampling policy which makes a sampling decision
+ // for a given trace when requested.
+ TraceAcceptCfgs []TraceAcceptCfg `mapstructure:"trace_accept_filters"`
+ // TraceRejectCfgs sets the criteria for which traces are evaluated before applying sampling rules. If
+ // trace matches them, it is no further processed
+ TraceRejectCfgs []TraceRejectCfg `mapstructure:"trace_reject_filters"`
+}
diff --git a/processor/cascadingfilterprocessor/config_test.go b/processor/cascadingfilterprocessor/config_test.go
new file mode 100644
index 000000000000..382fd3d2f038
--- /dev/null
+++ b/processor/cascadingfilterprocessor/config_test.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "path"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/component/componenttest"
+ "go.opentelemetry.io/collector/config"
+ "go.opentelemetry.io/collector/config/configtest"
+
+ cfconfig "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+)
+
+func TestLoadConfig(t *testing.T) {
+ factories, err := componenttest.NopFactories()
+ assert.NoError(t, err)
+
+ factory := NewFactory()
+ factories.Processors[factory.Type()] = factory
+
+ cfg, err := configtest.LoadConfig(path.Join(".", "testdata", "cascading_filter_config.yaml"), factories)
+ require.NoError(t, err)
+ require.NotNil(t, cfg)
+
+ minDurationValue := 9 * time.Second
+ minSpansValue := 10
+ minErrorsValue := 2
+ probFilteringRatio := float32(0.1)
+ probFilteringRate := int32(100)
+ namePatternValue := "foo.*"
+ healthCheckNamePatternValue := "health.*"
+
+ id1 := config.NewComponentIDWithName("cascading_filter", "1")
+ ps1 := config.NewProcessorSettings(id1)
+ assert.Equal(t, cfg.Processors[id1],
+ &cfconfig.Config{
+ DecisionWait: 30 * time.Second,
+ SpansPerSecond: 0,
+ NumTraces: 100000,
+ ProcessorSettings: &ps1,
+ ProbabilisticFilteringRate: &probFilteringRate,
+ TraceRejectCfgs: []cfconfig.TraceRejectCfg{
+ {
+ Name: "healthcheck-rule",
+ NamePattern: &healthCheckNamePatternValue,
+ },
+ },
+ TraceAcceptCfgs: []cfconfig.TraceAcceptCfg{
+ {
+ Name: "include-errors",
+ SpansPerSecond: 200,
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ MinNumberOfErrors: &minErrorsValue,
+ },
+ },
+ {
+ Name: "include-long-traces",
+ SpansPerSecond: 300,
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ MinNumberOfSpans: &minSpansValue,
+ },
+ },
+ {
+ Name: "include-high-latency",
+ SpansPerSecond: 400,
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ MinDuration: &minDurationValue,
+ },
+ },
+ },
+ })
+
+ id2 := config.NewComponentIDWithName("cascading_filter", "2")
+ ps2 := config.NewProcessorSettings(id2)
+ assert.Equal(t, cfg.Processors[id2],
+ &cfconfig.Config{
+ ProcessorSettings: &ps2,
+ DecisionWait: 10 * time.Second,
+ NumTraces: 100,
+ ExpectedNewTracesPerSec: 10,
+ SpansPerSecond: 1000,
+ ProbabilisticFilteringRatio: &probFilteringRatio,
+ TraceRejectCfgs: []cfconfig.TraceRejectCfg{
+ {
+ Name: "healthcheck-rule",
+ NamePattern: &healthCheckNamePatternValue,
+ },
+ {
+ Name: "remove-all-traces-with-healthcheck-service",
+ NamePattern: nil,
+ NumericAttributeCfg: nil,
+ StringAttributeCfg: &cfconfig.StringAttributeCfg{
+ Key: "service.name",
+ Values: []string{"healthcheck"},
+ },
+ },
+ },
+ TraceAcceptCfgs: []cfconfig.TraceAcceptCfg{
+ {
+ Name: "test-policy-1",
+ },
+ {
+ Name: "test-policy-2",
+ NumericAttributeCfg: &cfconfig.NumericAttributeCfg{Key: "key1", MinValue: 50, MaxValue: 100},
+ },
+ {
+ Name: "test-policy-3",
+ StringAttributeCfg: &cfconfig.StringAttributeCfg{Key: "key2", Values: []string{"value1", "value2"}},
+ },
+ {
+ Name: "test-policy-4",
+ SpansPerSecond: 35,
+ },
+ {
+ Name: "test-policy-5",
+ SpansPerSecond: 123,
+ NumericAttributeCfg: &cfconfig.NumericAttributeCfg{
+ Key: "key1", MinValue: 50, MaxValue: 100},
+ InvertMatch: true,
+ },
+ {
+ Name: "test-policy-6",
+ SpansPerSecond: 50,
+
+ PropertiesCfg: cfconfig.PropertiesCfg{MinDuration: &minDurationValue},
+ },
+ {
+ Name: "test-policy-7",
+ PropertiesCfg: cfconfig.PropertiesCfg{
+ NamePattern: &namePatternValue,
+ MinDuration: &minDurationValue,
+ MinNumberOfSpans: &minSpansValue,
+ MinNumberOfErrors: &minErrorsValue,
+ },
+ },
+ {
+ Name: "everything_else",
+ SpansPerSecond: -1,
+ },
+ },
+ })
+}
diff --git a/processor/cascadingfilterprocessor/factory.go b/processor/cascadingfilterprocessor/factory.go
new file mode 100644
index 000000000000..d5c4fa7738fe
--- /dev/null
+++ b/processor/cascadingfilterprocessor/factory.go
@@ -0,0 +1,72 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "context"
+ "time"
+
+ "go.opencensus.io/stats/view"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/config"
+ "go.opentelemetry.io/collector/config/configtelemetry"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/processor/processorhelper"
+
+ cfconfig "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+)
+
+const (
+ // The value of "type" Cascading Filter in configuration.
+ typeStr = "cascading_filter"
+)
+
+func init() {
+ // TODO: this is hardcoding the metrics level
+ err := view.Register(CascadingFilterMetricViews(configtelemetry.LevelNormal)...)
+ if err != nil {
+ panic("failed to register cascadingfilterprocessor: " + err.Error())
+ }
+}
+
+// NewFactory returns a new factory for the Cascading Filter processor.
+func NewFactory() component.ProcessorFactory {
+ return processorhelper.NewFactory(
+ typeStr,
+ createDefaultConfig,
+ processorhelper.WithTraces(createTraceProcessor))
+}
+
+func createDefaultConfig() config.Processor {
+ id := config.NewComponentID("cascading_filter")
+ ps := config.NewProcessorSettings(id)
+
+ return &cfconfig.Config{
+ ProcessorSettings: &ps,
+ DecisionWait: 30 * time.Second,
+ NumTraces: 100000,
+ SpansPerSecond: 0,
+ }
+}
+
+func createTraceProcessor(
+ _ context.Context,
+ settings component.ProcessorCreateSettings,
+ cfg config.Processor,
+ nextConsumer consumer.Traces,
+) (component.TracesProcessor, error) {
+ tCfg := cfg.(*cfconfig.Config)
+ return newTraceProcessor(settings.Logger, nextConsumer, *tCfg)
+}
diff --git a/processor/cascadingfilterprocessor/factory_test.go b/processor/cascadingfilterprocessor/factory_test.go
new file mode 100644
index 000000000000..b19bf9dd34ec
--- /dev/null
+++ b/processor/cascadingfilterprocessor/factory_test.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/config/configtest"
+ "go.opentelemetry.io/collector/consumer/consumertest"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+)
+
+func TestCreateDefaultConfig(t *testing.T) {
+ cfg := createDefaultConfig()
+ assert.NotNil(t, cfg, "failed to create default config")
+ assert.NoError(t, configtest.CheckConfigStruct(cfg))
+}
+
+func TestCreateProcessor(t *testing.T) {
+ factory := NewFactory()
+
+ cfg := factory.CreateDefaultConfig().(*config.Config)
+ // Manually set required fields
+ cfg.ExpectedNewTracesPerSec = 64
+ cfg.PolicyCfgs = []config.TraceAcceptCfg{
+ {
+ Name: "test-policy",
+ },
+ }
+
+ params := component.ProcessorCreateSettings{
+ TelemetrySettings: component.TelemetrySettings{Logger: zap.NewNop()},
+ }
+ tp, err := factory.CreateTracesProcessor(context.Background(), params, cfg, consumertest.NewNop())
+ assert.NotNil(t, tp)
+ assert.NoError(t, err, "cannot create trace processor")
+}
diff --git a/processor/cascadingfilterprocessor/go.mod b/processor/cascadingfilterprocessor/go.mod
new file mode 100644
index 000000000000..cad319cabd98
--- /dev/null
+++ b/processor/cascadingfilterprocessor/go.mod
@@ -0,0 +1,36 @@
+module github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor
+
+go 1.17
+
+require (
+ github.com/google/uuid v1.3.0
+ github.com/stretchr/testify v1.7.0
+ go.opencensus.io v0.23.0
+ go.opentelemetry.io/collector v0.38.0
+ go.opentelemetry.io/collector/model v0.38.0
+ go.uber.org/zap v1.19.1
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/knadh/koanf v1.3.0 // indirect
+ github.com/magiconair/properties v1.8.5 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
+ github.com/mitchellh/mapstructure v1.4.2 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
+ github.com/pelletier/go-toml v1.9.3 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.6.1 // indirect
+ github.com/spf13/cast v1.4.1 // indirect
+ go.opentelemetry.io/otel v1.0.1 // indirect
+ go.opentelemetry.io/otel/metric v0.24.0 // indirect
+ go.opentelemetry.io/otel/trace v1.0.1 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/multierr v1.7.0 // indirect
+ golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+)
diff --git a/processor/cascadingfilterprocessor/go.sum b/processor/cascadingfilterprocessor/go.sum
new file mode 100644
index 000000000000..8adc13a703b9
--- /dev/null
+++ b/processor/cascadingfilterprocessor/go.sum
@@ -0,0 +1,799 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
+github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
+github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
+github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
+github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
+github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
+github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q=
+github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/knadh/koanf v1.3.0 h1:nNmG4HGbpJUv7eUV1skDvHzzFS+35Q3b+OsYvoXyt2E=
+github.com/knadh/koanf v1.3.0/go.mod h1:HZ7HMLIGbrWJUfgtEzfHvzR/rX+eIqQlBNPRr4Vt42s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
+github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ=
+github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
+github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/collector v0.38.0 h1:lFo/6pUTIx5AIsikhXk99IebnZ0liD3y7Fcw/hjc/GM=
+go.opentelemetry.io/collector v0.38.0/go.mod h1:z3DoFBVBO4eRjDTxR3FptJeSdFXWv+OPlyb2vpjOP2k=
+go.opentelemetry.io/collector/model v0.38.0 h1:zJmh+oBqEReMfDXu6acUMesUgv6CMunm3YWKcAV69VI=
+go.opentelemetry.io/collector/model v0.38.0/go.mod h1:gS8A27wi+8gM3hrXL+dEjTbrbLxktjHjAwwqI31ELgQ=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.25.0/go.mod h1:NyB05cd+yPX6W5SiRNuJ90w7PV2+g2cgRbsPL7MvpME=
+go.opentelemetry.io/contrib/zpages v0.25.0/go.mod h1:cXBK0CNcIBD9Iiw1Hv3DvS+E8N2rtr+k+OoPs+sP0T4=
+go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc=
+go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
+go.opentelemetry.io/otel/exporters/prometheus v0.24.0/go.mod h1:jfc9W1hVK0w9zrsE+C2ELje/M+K67cGinzeg8qQ8oog=
+go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA=
+go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk=
+go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU=
+go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4=
+go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
+go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y=
+go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk=
+go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw=
+go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
+go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 h1:ikCpsnYR+Ew0vu99XlDp55lGgDJdIMx3f4a18jfse/s=
+golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/processor/cascadingfilterprocessor/idbatcher/id_batcher.go b/processor/cascadingfilterprocessor/idbatcher/id_batcher.go
new file mode 100644
index 000000000000..29e2632b7653
--- /dev/null
+++ b/processor/cascadingfilterprocessor/idbatcher/id_batcher.go
@@ -0,0 +1,141 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idbatcher defines a pipeline of fixed size in which the
+// elements are batches of ids.
+package idbatcher
+
+import (
+ "errors"
+ "sync"
+
+ "go.opentelemetry.io/collector/model/pdata"
+)
+
+var (
+ // ErrInvalidNumBatches occurs when an invalid number of batches is specified.
+ ErrInvalidNumBatches = errors.New("invalid number of batches, it must be greater than zero")
+ // ErrInvalidBatchChannelSize occurs when an invalid batch channel size is specified.
+ ErrInvalidBatchChannelSize = errors.New("invalid batch channel size, it must be greater than zero")
+)
+
+// Batch is the type of batches held by the Batcher.
+type Batch []pdata.TraceID
+
+// Batcher behaves like a pipeline of batches that has a fixed number of batches in the pipe
+// and a new batch being built outside of the pipe. Items can be concurrently added to the batch
+// currently being built. When the batch being built is closed, the oldest batch in the pipe
+// is pushed out so the one just closed can be put on the end of the pipe (this is done as an
+// atomic operation). The caller is in control of when a batch is completed and a new one should
+// be started.
+type Batcher interface {
+ // AddToCurrentBatch puts the given id on the batch being currently built. The client is in charge
+ // of limiting the growth of the current batch if appropriate for its scenario. It can
+ // either call CloseCurrentAndTakeFirstBatch earlier or stop adding new items depending on what is
+ // required by the scenario.
+ AddToCurrentBatch(id pdata.TraceID)
+ // CloseCurrentAndTakeFirstBatch takes the batch at the front of the pipe, and moves the current
+ // batch to the end of the pipe, creating a new batch to receive new items. This operation should
+ // be atomic.
+ // It returns the batch that was in front of the pipe and a boolean that if true indicates that
+ // there are more batches to be retrieved.
+ CloseCurrentAndTakeFirstBatch() (Batch, bool)
+ // Stop informs that no more items are going to be batched and the pipeline can be read until it
+ // is empty. After this method is called attempts to enqueue new items will panic.
+ Stop()
+}
+
+var _ Batcher = (*batcher)(nil)
+
+type batcher struct {
+ pendingIds chan pdata.TraceID // Channel for the ids to be added to the next batch.
+ batches chan Batch // Channel with already captured batches.
+
+ // cbMutex protects the currentBatch storing ids.
+ cbMutex sync.Mutex
+ currentBatch Batch
+
+ newBatchesInitialCapacity uint64
+ stopchan chan bool
+ stopped bool
+}
+
+// New creates a Batcher that will hold numBatches in its pipeline, having a channel with
+// batchChannelSize to receive new items. New batches will be created with capacity set to
+// newBatchesInitialCapacity.
+func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batcher, error) {
+ if numBatches < 1 {
+ return nil, ErrInvalidNumBatches
+ }
+ if batchChannelSize < 1 {
+ return nil, ErrInvalidBatchChannelSize
+ }
+
+ batches := make(chan Batch, numBatches)
+ // First numBatches batches will be empty in order to simplify clients that are running
+ // CloseCurrentAndTakeFirstBatch on a timer and want to delay the processing of the first
+ // batch with actual data. This way there is no need for accounting on the client side and
+ // a single timer can be started immediately.
+ for i := uint64(0); i < numBatches; i++ {
+ batches <- nil
+ }
+
+ batcher := &batcher{
+ pendingIds: make(chan pdata.TraceID, batchChannelSize),
+ batches: batches,
+ currentBatch: make(Batch, 0, newBatchesInitialCapacity),
+ newBatchesInitialCapacity: newBatchesInitialCapacity,
+ stopchan: make(chan bool),
+ }
+
+ // Single goroutine that keeps filling the current batch, contention is expected only
+ // when the current batch is being switched.
+ go func() {
+ for id := range batcher.pendingIds {
+ batcher.cbMutex.Lock()
+ batcher.currentBatch = append(batcher.currentBatch, id)
+ batcher.cbMutex.Unlock()
+ }
+ batcher.stopchan <- true
+ }()
+
+ return batcher, nil
+}
+
+func (b *batcher) AddToCurrentBatch(id pdata.TraceID) {
+ b.pendingIds <- id
+}
+
+func (b *batcher) CloseCurrentAndTakeFirstBatch() (Batch, bool) {
+ if readBatch, ok := <-b.batches; ok {
+ if !b.stopped {
+ nextBatch := make(Batch, 0, b.newBatchesInitialCapacity)
+ b.cbMutex.Lock()
+ b.batches <- b.currentBatch
+ b.currentBatch = nextBatch
+ b.cbMutex.Unlock()
+ }
+ return readBatch, true
+ }
+
+ readBatch := b.currentBatch
+ b.currentBatch = nil
+ return readBatch, false
+}
+
+func (b *batcher) Stop() {
+ close(b.pendingIds)
+ b.stopped = <-b.stopchan
+ close(b.batches)
+}
diff --git a/processor/cascadingfilterprocessor/idbatcher/id_batcher_test.go b/processor/cascadingfilterprocessor/idbatcher/id_batcher_test.go
new file mode 100644
index 000000000000..ed86da91589d
--- /dev/null
+++ b/processor/cascadingfilterprocessor/idbatcher/id_batcher_test.go
@@ -0,0 +1,161 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package idbatcher
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/model/pdata"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/bigendianconverter"
+)
+
+func TestBatcherNew(t *testing.T) {
+ tests := []struct {
+ name string
+ numBatches uint64
+ newBatchesInitialCapacity uint64
+ batchChannelSize uint64
+ wantErr error
+ }{
+ {"invalid numBatches", 0, 0, 1, ErrInvalidNumBatches},
+ {"invalid batchChannelSize", 1, 0, 0, ErrInvalidBatchChannelSize},
+ {"valid", 1, 0, 1, nil},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := New(tt.numBatches, tt.newBatchesInitialCapacity, tt.batchChannelSize)
+ if err != tt.wantErr {
+ t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != nil {
+ got.Stop()
+ }
+ })
+ }
+}
+
+func TestTypicalConfig(t *testing.T) {
+ concurrencyTest(t, 10, 100, uint64(4*runtime.NumCPU()))
+}
+
+func TestMinBufferedChannels(t *testing.T) {
+ concurrencyTest(t, 1, 0, 1)
+}
+
+func BenchmarkConcurrentEnqueue(b *testing.B) {
+ ids := generateSequentialIds(1)
+ batcher, err := New(10, 100, uint64(4*runtime.NumCPU()))
+ defer batcher.Stop()
+ if err != nil {
+ b.Fatalf("Failed to create Batcher: %v", err)
+ }
+
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+ var ticked int32
+ var received int32
+ go func() {
+ for range ticker.C {
+ batch, _ := batcher.CloseCurrentAndTakeFirstBatch()
+ atomic.AddInt32(&ticked, 1)
+ atomic.AddInt32(&received, int32(len(batch)))
+ }
+ }()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ batcher.AddToCurrentBatch(ids[0])
+ }
+ })
+}
+
+func concurrencyTest(t *testing.T, numBatches, newBatchesInitialCapacity, batchChannelSize uint64) {
+ batcher, err := New(numBatches, newBatchesInitialCapacity, batchChannelSize)
+ require.NoError(t, err, "Failed to create Batcher: %v", err)
+
+ ticker := time.NewTicker(100 * time.Millisecond)
+ stopTicker := make(chan bool)
+ var got Batch
+ go func() {
+ var completedDequeues uint64
+ outer:
+ for {
+ select {
+ case <-ticker.C:
+ g, _ := batcher.CloseCurrentAndTakeFirstBatch()
+ completedDequeues++
+ if completedDequeues <= numBatches && len(g) != 0 {
+ t.Error("Some of the first batches were not empty")
+ return
+ }
+ got = append(got, g...)
+ case <-stopTicker:
+ break outer
+ }
+ }
+ }()
+
+ ids := generateSequentialIds(10000)
+ wg := &sync.WaitGroup{}
+ for i := 0; i < len(ids); i++ {
+ wg.Add(1)
+ go func(id pdata.TraceID) {
+ batcher.AddToCurrentBatch(id)
+ wg.Done()
+ }(ids[i])
+ }
+
+ wg.Wait()
+ stopTicker <- true
+ ticker.Stop()
+ batcher.Stop()
+
+ // Get all ids added to the batcher
+ for {
+ batch, ok := batcher.CloseCurrentAndTakeFirstBatch()
+ got = append(got, batch...)
+ if !ok {
+ break
+ }
+ }
+
+ require.Equal(t, len(ids), len(got), "Batcher got incorrect count of traces from batches")
+
+ idSeen := make(map[[16]byte]bool, len(ids))
+ for _, id := range got {
+ idSeen[id.Bytes()] = true
+ }
+
+ for i := 0; i < len(ids); i++ {
+ require.True(t, idSeen[ids[i].Bytes()], "want id %v but id was not seen", ids[i])
+ }
+}
+
+func generateSequentialIds(numIds uint64) []pdata.TraceID {
+ ids := make([]pdata.TraceID, numIds)
+ for i := uint64(0); i < numIds; i++ {
+ ids[i] = bigendianconverter.UInt64ToTraceID(0, i)
+ }
+ return ids
+}
diff --git a/processor/cascadingfilterprocessor/metrics.go b/processor/cascadingfilterprocessor/metrics.go
new file mode 100644
index 000000000000..d05990337d23
--- /dev/null
+++ b/processor/cascadingfilterprocessor/metrics.go
@@ -0,0 +1,151 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.opentelemetry.io/collector/config/configtelemetry"
+)
+
+// Variables related to metrics specific to Cascading Filter.
+var (
+ statusSampled = "Sampled"
+ statusNotSampled = "NotSampled"
+ statusExceededKey = "RateExceeded"
+ statusSecondChance = "SecondChance"
+ statusSecondChanceSampled = "SecondChanceSampled"
+ statusSecondChanceExceeded = "SecondChanceRateExceeded"
+ statusDropped = "Dropped"
+
+ tagPolicyKey, _ = tag.NewKey("policy")
+ tagCascadingFilterDecisionKey, _ = tag.NewKey("cascading_filter_decision")
+ tagPolicyDecisionKey, _ = tag.NewKey("policy_decision")
+
+ statDecisionLatencyMicroSec = stats.Int64("policy_decision_latency", "Latency (in microseconds) of a given filtering policy", "µs")
+ statOverallDecisionLatencyus = stats.Int64("cascading_filtering_batch_processing_latency", "Latency (in microseconds) of each run of the cascading filter timer", "µs")
+
+ statTraceRemovalAgeSec = stats.Int64("cascading_trace_removal_age", "Time (in seconds) from arrival of a new trace until its removal from memory", "s")
+ statLateSpanArrivalAfterDecision = stats.Int64("cascadind_late_span_age", "Time (in seconds) from the cascading filter decision was taken and the arrival of a late span", "s")
+
+ statPolicyEvaluationErrorCount = stats.Int64("cascading_policy_evaluation_error", "Count of cascading policy evaluation errors", stats.UnitDimensionless)
+
+ statCascadingFilterDecision = stats.Int64("count_final_decision", "Count of traces that were filtered or not", stats.UnitDimensionless)
+ statPolicyDecision = stats.Int64("count_policy_decision", "Count of provisional (policy) decisions if traces were filtered or not", stats.UnitDimensionless)
+
+ statDroppedTooEarlyCount = stats.Int64("casdading_trace_dropped_too_early", "Count of traces that needed to be dropped the configured wait time", stats.UnitDimensionless)
+ statNewTraceIDReceivedCount = stats.Int64("cascading_new_trace_id_received", "Counts the arrival of new traces", stats.UnitDimensionless)
+ statTracesOnMemoryGauge = stats.Int64("cascading_traces_on_memory", "Tracks the number of traces current on memory", stats.UnitDimensionless)
+)
+
+// CascadingFilterMetricViews return the metrics views according to given telemetry level.
+func CascadingFilterMetricViews(level configtelemetry.Level) []*view.View {
+ if level == configtelemetry.LevelNone {
+ return nil
+ }
+
+ latencyDistributionAggregation := view.Distribution(1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000)
+ ageDistributionAggregation := view.Distribution(1, 2, 5, 10, 20, 30, 40, 50, 60, 90, 120, 180, 300, 600, 1800, 3600, 7200)
+
+ overallDecisionLatencyView := &view.View{
+ Name: statOverallDecisionLatencyus.Name(),
+ Measure: statOverallDecisionLatencyus,
+ Description: statOverallDecisionLatencyus.Description(),
+ Aggregation: latencyDistributionAggregation,
+ }
+
+ traceRemovalAgeView := &view.View{
+ Name: statTraceRemovalAgeSec.Name(),
+ Measure: statTraceRemovalAgeSec,
+ Description: statTraceRemovalAgeSec.Description(),
+ Aggregation: ageDistributionAggregation,
+ }
+
+ lateSpanArrivalView := &view.View{
+ Name: statLateSpanArrivalAfterDecision.Name(),
+ Measure: statLateSpanArrivalAfterDecision,
+ Description: statLateSpanArrivalAfterDecision.Description(),
+ Aggregation: ageDistributionAggregation,
+ }
+
+ countPolicyEvaluationErrorView := &view.View{
+ Name: statPolicyEvaluationErrorCount.Name(),
+ Measure: statPolicyEvaluationErrorCount,
+ Description: statPolicyEvaluationErrorCount.Description(),
+ Aggregation: view.Sum(),
+ }
+
+ countFinalDecisionView := &view.View{
+ Name: statCascadingFilterDecision.Name(),
+ Measure: statCascadingFilterDecision,
+ Description: statCascadingFilterDecision.Description(),
+ TagKeys: []tag.Key{tagPolicyKey, tagCascadingFilterDecisionKey},
+ Aggregation: view.Sum(),
+ }
+
+ countPolicyDecisionsView := &view.View{
+ Name: statPolicyDecision.Name(),
+ Measure: statPolicyDecision,
+ Description: statPolicyDecision.Description(),
+ TagKeys: []tag.Key{tagPolicyKey, tagPolicyDecisionKey},
+ Aggregation: view.Sum(),
+ }
+
+ policyLatencyView := &view.View{
+ Name: statDecisionLatencyMicroSec.Name(),
+ Measure: statDecisionLatencyMicroSec,
+ Description: statDecisionLatencyMicroSec.Description(),
+ TagKeys: []tag.Key{tagPolicyKey},
+ Aggregation: view.Sum(),
+ }
+
+ countTraceDroppedTooEarlyView := &view.View{
+ Name: statDroppedTooEarlyCount.Name(),
+ Measure: statDroppedTooEarlyCount,
+ Description: statDroppedTooEarlyCount.Description(),
+ Aggregation: view.Sum(),
+ }
+ countTraceIDArrivalView := &view.View{
+ Name: statNewTraceIDReceivedCount.Name(),
+ Measure: statNewTraceIDReceivedCount,
+ Description: statNewTraceIDReceivedCount.Description(),
+ Aggregation: view.Sum(),
+ }
+ trackTracesOnMemorylView := &view.View{
+ Name: statTracesOnMemoryGauge.Name(),
+ Measure: statTracesOnMemoryGauge,
+ Description: statTracesOnMemoryGauge.Description(),
+ Aggregation: view.LastValue(),
+ }
+
+ legacyViews := []*view.View{
+ overallDecisionLatencyView,
+ traceRemovalAgeView,
+ lateSpanArrivalView,
+
+ countPolicyDecisionsView,
+ policyLatencyView,
+ countFinalDecisionView,
+
+ countPolicyEvaluationErrorView,
+ countTraceDroppedTooEarlyView,
+ countTraceIDArrivalView,
+ trackTracesOnMemorylView,
+ }
+
+ // return obsreport.ProcessorMetricViews(typeStr, legacyViews)
+ return legacyViews
+}
diff --git a/processor/cascadingfilterprocessor/processor.go b/processor/cascadingfilterprocessor/processor.go
new file mode 100644
index 000000000000..42c56a2704bc
--- /dev/null
+++ b/processor/cascadingfilterprocessor/processor.go
@@ -0,0 +1,726 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "context"
+ "math"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opencensus.io/stats"
+ "go.opencensus.io/tag"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/component/componenterror"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/idbatcher"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/sampling"
+)
+
+// TraceAcceptEvaluator combines a sampling policy evaluator with the destinations to be
+// used for that policy.
+type TraceAcceptEvaluator struct {
+ // Name used to identify this policy instance.
+ Name string
+ // Evaluator that decides if a trace is sampled or not by this policy instance.
+ Evaluator sampling.PolicyEvaluator
+ // ctx used to carry metric tags of each policy.
+ ctx context.Context
+ // probabilisticFilter determines whether `sampling.probability` field must be calculated and added
+ probabilisticFilter bool
+}
+
+// TraceRejectEvaluator holds checking if trace should be dropped completely before further processing
+type TraceRejectEvaluator struct {
+ // Name used to identify this policy instance.
+ Name string
+ // Evaluator that decides if a trace is sampled or not by this policy instance.
+ Evaluator sampling.DropTraceEvaluator
+ // ctx used to carry metric tags of each policy.
+ ctx context.Context
+}
+
+// traceKey is defined since sync.Map requires a comparable type, isolating it on its own
+// type to help track usage.
+type traceKey [16]byte
+
+// cascadingFilterSpanProcessor handles the incoming trace data and uses the given sampling
+// policy to sample traces.
+type cascadingFilterSpanProcessor struct {
+ ctx context.Context
+ nextConsumer consumer.Traces
+ start sync.Once
+ maxNumTraces uint64
+ traceAcceptRules []*TraceAcceptEvaluator
+ traceRejectRules []*TraceRejectEvaluator
+ logger *zap.Logger
+ idToTrace sync.Map
+ policyTicker tTicker
+ decisionBatcher idbatcher.Batcher
+ deleteChan chan traceKey
+ numTracesOnMap uint64
+
+ currentSecond int64
+ maxSpansPerSecond int32
+ spansInCurrentSecond int32
+}
+
+const (
+ probabilisticFilterPolicyName = "probabilistic_filter"
+ probabilisticRuleVale = "probabilistic"
+ filteredRuleValue = "filtered"
+ AttributeSamplingRule = "sampling.rule"
+
+ AttributeSamplingProbability = "sampling.probability"
+)
+
+// newTraceProcessor returns a processor.TraceProcessor that will perform Cascading Filter according to the given
+// configuration.
+func newTraceProcessor(logger *zap.Logger, nextConsumer consumer.Traces, cfg config.Config) (component.TracesProcessor, error) {
+ if nextConsumer == nil {
+ return nil, componenterror.ErrNilNextConsumer
+ }
+
+ return newCascadingFilterSpanProcessor(logger, nextConsumer, cfg)
+}
+
+func newCascadingFilterSpanProcessor(logger *zap.Logger, nextConsumer consumer.Traces, cfg config.Config) (*cascadingFilterSpanProcessor, error) {
+ numDecisionBatches := uint64(cfg.DecisionWait.Seconds())
+ inBatcher, err := idbatcher.New(numDecisionBatches, cfg.ExpectedNewTracesPerSec, uint64(2*runtime.NumCPU()))
+ if err != nil {
+ return nil, err
+ }
+
+ ctx := context.Background()
+ var policies []*TraceAcceptEvaluator
+ var dropTraceEvals []*TraceRejectEvaluator
+
+ // Prepare Trace Reject config
+
+ for _, dropCfg := range cfg.TraceRejectCfgs {
+ dropCtx, err := tag.New(ctx, tag.Upsert(tagPolicyKey, dropCfg.Name), tag.Upsert(tagPolicyDecisionKey, statusDropped))
+ if err != nil {
+ return nil, err
+ }
+ evaluator, err := sampling.NewDropTraceEvaluator(logger, dropCfg)
+ if err != nil {
+ return nil, err
+ }
+ dropEval := &TraceRejectEvaluator{
+ Name: dropCfg.Name,
+ Evaluator: evaluator,
+ ctx: dropCtx,
+ }
+ logger.Info("Adding trace reject rule", zap.String("name", dropCfg.Name))
+ dropTraceEvals = append(dropTraceEvals, dropEval)
+ }
+
+ // Prepare Trace Accept config
+
+ var policyCfgs []config.TraceAcceptCfg
+ totalRate := int32(0)
+
+ if len(cfg.TraceAcceptCfgs) > 0 {
+ policyCfgs = append(policyCfgs, cfg.TraceAcceptCfgs...)
+ }
+
+ if len(cfg.PolicyCfgs) > 0 {
+ logger.Warn("'traceAcceptRules' is deprecated and will be removed in future versions, please use 'trace_accept_filters' instead")
+ policyCfgs = append(policyCfgs, cfg.PolicyCfgs...)
+ }
+
+ for i := range policyCfgs {
+ policyCfg := policyCfgs[i]
+ policyCtx, err := tag.New(ctx, tag.Upsert(tagPolicyKey, policyCfg.Name))
+ if err != nil {
+ return nil, err
+ }
+ eval, err := buildPolicyEvaluator(logger, &policyCfg)
+ if err != nil {
+ return nil, err
+ }
+ policy := &TraceAcceptEvaluator{
+ Name: policyCfg.Name,
+ Evaluator: eval,
+ ctx: policyCtx,
+ probabilisticFilter: false,
+ }
+ if policyCfg.SpansPerSecond > 0 {
+ totalRate += policyCfg.SpansPerSecond
+ }
+ logger.Info("Adding trace accept rule",
+ zap.String("name", policyCfg.Name),
+ zap.Int32("spans_per_second", policyCfg.SpansPerSecond))
+ policies = append(policies, policy)
+ }
+
+ // Recalculate the total spans per second rate if needed
+ spansPerSecond := cfg.SpansPerSecond
+ if spansPerSecond == 0 {
+ spansPerSecond = totalRate
+ if cfg.ProbabilisticFilteringRate != nil && *cfg.ProbabilisticFilteringRate > 0 {
+ spansPerSecond += *cfg.ProbabilisticFilteringRate
+ }
+ }
+
+ if spansPerSecond != 0 {
+ logger.Info("Setting total spans per second limit", zap.Int32("spans_per_second", spansPerSecond))
+ } else {
+ logger.Info("Not setting total spans per second limit (only selected traces will be filtered out)")
+ }
+
+ // Setup probabilistic filtering - using either ratio or rate.
+ // This must be always evaluated first as it must select traces independently of other traceAcceptRules
+
+ probabilisticFilteringRate := int32(-1)
+
+ if cfg.ProbabilisticFilteringRatio != nil && *cfg.ProbabilisticFilteringRatio > 0.0 && spansPerSecond > 0 {
+ probabilisticFilteringRate = int32(float32(spansPerSecond) * *cfg.ProbabilisticFilteringRatio)
+ } else if cfg.ProbabilisticFilteringRate != nil && *cfg.ProbabilisticFilteringRate > 0 {
+ probabilisticFilteringRate = *cfg.ProbabilisticFilteringRate
+ }
+
+ if probabilisticFilteringRate > 0 {
+ logger.Info("Setting probabilistic filtering rate", zap.Int32("probabilistic_filtering_rate", probabilisticFilteringRate))
+
+ policyCtx, err := tag.New(ctx, tag.Upsert(tagPolicyKey, probabilisticFilterPolicyName))
+ if err != nil {
+ return nil, err
+ }
+ eval, err := buildProbabilisticFilterEvaluator(logger, probabilisticFilteringRate)
+ if err != nil {
+ return nil, err
+ }
+ policy := &TraceAcceptEvaluator{
+ Name: probabilisticFilterPolicyName,
+ Evaluator: eval,
+ ctx: policyCtx,
+ probabilisticFilter: true,
+ }
+ policies = append([]*TraceAcceptEvaluator{policy}, policies...)
+ } else {
+ logger.Info("Not setting probabilistic filtering rate")
+ }
+
+ // Build the span procesor
+
+ cfsp := &cascadingFilterSpanProcessor{
+ ctx: ctx,
+ nextConsumer: nextConsumer,
+ maxNumTraces: cfg.NumTraces,
+ maxSpansPerSecond: spansPerSecond,
+ logger: logger,
+ decisionBatcher: inBatcher,
+ traceAcceptRules: policies,
+ traceRejectRules: dropTraceEvals,
+ }
+
+ cfsp.policyTicker = &policyTicker{onTick: cfsp.samplingPolicyOnTick}
+ cfsp.deleteChan = make(chan traceKey, cfg.NumTraces)
+
+ return cfsp, nil
+}
+
+func buildPolicyEvaluator(logger *zap.Logger, cfg *config.TraceAcceptCfg) (sampling.PolicyEvaluator, error) {
+ return sampling.NewFilter(logger, cfg)
+}
+
+func buildProbabilisticFilterEvaluator(logger *zap.Logger, maxSpanRate int32) (sampling.PolicyEvaluator, error) {
+ return sampling.NewProbabilisticFilter(logger, maxSpanRate)
+}
+
+type policyMetrics struct {
+ idNotFoundOnMapCount, evaluateErrorCount, decisionSampled, decisionNotSampled int64
+}
+
+func (cfsp *cascadingFilterSpanProcessor) updateRate(currSecond int64, numSpans int32) sampling.Decision {
+ if cfsp.maxSpansPerSecond <= 0 {
+ return sampling.Sampled
+ }
+
+ if cfsp.currentSecond != currSecond {
+ cfsp.currentSecond = currSecond
+ cfsp.spansInCurrentSecond = 0
+ }
+
+ spansInSecondIfSampled := cfsp.spansInCurrentSecond + numSpans
+ if spansInSecondIfSampled <= cfsp.maxSpansPerSecond {
+ cfsp.spansInCurrentSecond = spansInSecondIfSampled
+ return sampling.Sampled
+ }
+
+ return sampling.NotSampled
+}
+
+func (cfsp *cascadingFilterSpanProcessor) samplingPolicyOnTick() {
+ metrics := policyMetrics{}
+
+ startTime := time.Now()
+ batch, _ := cfsp.decisionBatcher.CloseCurrentAndTakeFirstBatch()
+ batchLen := len(batch)
+ cfsp.logger.Debug("Sampling Policy Evaluation ticked")
+
+ currSecond := time.Now().Unix()
+
+ totalSpans := int64(0)
+ selectedByProbabilisticFilterSpans := int64(0)
+
+ // The first run applies decisions to batches, executing each policy separately
+ for _, id := range batch {
+ d, ok := cfsp.idToTrace.Load(traceKey(id.Bytes()))
+ if !ok {
+ metrics.idNotFoundOnMapCount++
+ continue
+ }
+ trace := d.(*sampling.TraceData)
+ trace.DecisionTime = time.Now()
+
+ var provisionalDecision sampling.Decision
+
+ // Dropped traces are not included in probabilistic filtering calculations
+ if cfsp.shouldBeDropped(id, trace) {
+ provisionalDecision = sampling.Dropped
+ } else {
+ totalSpans += int64(trace.SpanCount)
+ provisionalDecision, _ = cfsp.makeProvisionalDecision(id, trace)
+ }
+
+ if provisionalDecision == sampling.Sampled {
+ trace.FinalDecision = cfsp.updateRate(currSecond, trace.SpanCount)
+ if trace.FinalDecision == sampling.Sampled {
+ if trace.SelectedByProbabilisticFilter {
+ selectedByProbabilisticFilterSpans += int64(trace.SpanCount)
+ }
+ err := stats.RecordWithTags(
+ cfsp.ctx,
+ []tag.Mutator{tag.Insert(tagCascadingFilterDecisionKey, statusSampled)},
+ statCascadingFilterDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on first run tick", zap.Error(err))
+ }
+ } else {
+ err := stats.RecordWithTags(
+ cfsp.ctx,
+ []tag.Mutator{tag.Insert(tagCascadingFilterDecisionKey, statusExceededKey)},
+ statCascadingFilterDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on first run tick", zap.Error(err))
+ }
+ }
+ } else if provisionalDecision == sampling.SecondChance {
+ trace.FinalDecision = sampling.SecondChance
+ } else {
+ trace.FinalDecision = provisionalDecision
+ err := stats.RecordWithTags(
+ cfsp.ctx,
+ []tag.Mutator{tag.Insert(tagCascadingFilterDecisionKey, statusNotSampled)},
+ statCascadingFilterDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on first run tick", zap.Error(err))
+ }
+ }
+ }
+
+ // The second run executes the decisions and makes "SecondChance" decisions in the meantime
+ for _, id := range batch {
+ d, ok := cfsp.idToTrace.Load(traceKey(id.Bytes()))
+ if !ok {
+ continue
+ }
+ trace := d.(*sampling.TraceData)
+ if trace.FinalDecision == sampling.SecondChance {
+ trace.FinalDecision = cfsp.updateRate(currSecond, trace.SpanCount)
+ if trace.FinalDecision == sampling.Sampled {
+ err := stats.RecordWithTags(
+ cfsp.ctx,
+ []tag.Mutator{tag.Insert(tagCascadingFilterDecisionKey, statusSecondChanceSampled)},
+ statCascadingFilterDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on second run tick", zap.Error(err))
+ }
+ } else {
+ err := stats.RecordWithTags(
+ cfsp.ctx,
+ []tag.Mutator{tag.Insert(tagCascadingFilterDecisionKey, statusSecondChanceExceeded)},
+ statCascadingFilterDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on second run tick", zap.Error(err))
+ }
+ }
+ }
+
+ // Sampled or not, remove the batches
+ trace.Lock()
+ traceBatches := trace.ReceivedBatches
+ trace.ReceivedBatches = nil
+ trace.Unlock()
+
+ if trace.FinalDecision == sampling.Sampled {
+ metrics.decisionSampled++
+
+ // Combine all individual batches into a single batch so
+ // consumers may operate on the entire trace
+ allSpans := pdata.NewTraces()
+ for j := 0; j < len(traceBatches); j++ {
+ batch := traceBatches[j]
+ batch.ResourceSpans().MoveAndAppendTo(allSpans.ResourceSpans())
+ }
+
+ if trace.SelectedByProbabilisticFilter {
+ updateProbabilisticRateTag(allSpans, selectedByProbabilisticFilterSpans, totalSpans)
+ } else {
+ updateFilteringTag(allSpans)
+ }
+
+ err := cfsp.nextConsumer.ConsumeTraces(cfsp.ctx, allSpans)
+ if err != nil {
+ cfsp.logger.Error("Sampling Policy Evaluation error on consuming traces", zap.Error(err))
+ }
+ } else {
+ metrics.decisionNotSampled++
+ }
+ }
+
+ stats.Record(cfsp.ctx,
+ statOverallDecisionLatencyus.M(int64(time.Since(startTime)/time.Microsecond)),
+ statDroppedTooEarlyCount.M(metrics.idNotFoundOnMapCount),
+ statPolicyEvaluationErrorCount.M(metrics.evaluateErrorCount),
+ statTracesOnMemoryGauge.M(int64(atomic.LoadUint64(&cfsp.numTracesOnMap))))
+
+ cfsp.logger.Debug("Sampling policy evaluation completed",
+ zap.Int("batch.len", batchLen),
+ zap.Int64("sampled", metrics.decisionSampled),
+ zap.Int64("notSampled", metrics.decisionNotSampled),
+ zap.Int64("droppedPriorToEvaluation", metrics.idNotFoundOnMapCount),
+ zap.Int64("policyEvaluationErrors", metrics.evaluateErrorCount),
+ )
+}
+
+func updateProbabilisticRateTag(traces pdata.Traces, probabilisticSpans int64, allSpans int64) {
+ ratio := float64(probabilisticSpans) / float64(allSpans)
+
+ rs := traces.ResourceSpans()
+
+ for i := 0; i < rs.Len(); i++ {
+ ils := rs.At(i).InstrumentationLibrarySpans()
+ for j := 0; j < ils.Len(); j++ {
+ spans := ils.At(j).Spans()
+ for k := 0; k < spans.Len(); k++ {
+ attrs := spans.At(k).Attributes()
+ av, found := attrs.Get(AttributeSamplingProbability)
+ if found && av.Type() == pdata.AttributeValueTypeDouble && !math.IsNaN(av.DoubleVal()) && av.DoubleVal() > 0.0 {
+ av.SetDoubleVal(av.DoubleVal() * ratio)
+ } else {
+ attrs.UpsertDouble(AttributeSamplingProbability, ratio)
+ }
+ attrs.UpsertString(AttributeSamplingRule, probabilisticRuleVale)
+ }
+ }
+ }
+}
+
+func updateFilteringTag(traces pdata.Traces) {
+ rs := traces.ResourceSpans()
+
+ for i := 0; i < rs.Len(); i++ {
+ ils := rs.At(i).InstrumentationLibrarySpans()
+ for j := 0; j < ils.Len(); j++ {
+ spans := ils.At(j).Spans()
+ for k := 0; k < spans.Len(); k++ {
+ attrs := spans.At(k).Attributes()
+ attrs.UpsertString(AttributeSamplingRule, filteredRuleValue)
+ }
+ }
+ }
+}
+
+func (cfsp *cascadingFilterSpanProcessor) shouldBeDropped(id pdata.TraceID, trace *sampling.TraceData) bool {
+ for _, dropRule := range cfsp.traceRejectRules {
+ if dropRule.Evaluator.ShouldDrop(id, trace) {
+ stats.Record(dropRule.ctx, statPolicyDecision.M(int64(1)))
+ return true
+ }
+ }
+ return false
+}
+
+func (cfsp *cascadingFilterSpanProcessor) makeProvisionalDecision(id pdata.TraceID, trace *sampling.TraceData) (sampling.Decision, *TraceAcceptEvaluator) {
+ // When no rules are defined, always sample
+ if len(cfsp.traceAcceptRules) == 0 {
+ return sampling.Sampled, nil
+ }
+
+ provisionalDecision := sampling.Unspecified
+
+ for i, policy := range cfsp.traceAcceptRules {
+ policyEvaluateStartTime := time.Now()
+ decision := policy.Evaluator.Evaluate(id, trace)
+ stats.Record(
+ policy.ctx,
+ statDecisionLatencyMicroSec.M(int64(time.Since(policyEvaluateStartTime)/time.Microsecond)))
+
+ trace.Decisions[i] = decision
+
+ switch decision {
+ case sampling.Sampled:
+ // any single policy that decides to sample will cause the decision to be sampled
+ // the nextConsumer will get the context from the first matching policy
+ provisionalDecision = sampling.Sampled
+
+ if policy.probabilisticFilter {
+ trace.SelectedByProbabilisticFilter = true
+ }
+
+ err := stats.RecordWithTags(
+ policy.ctx,
+ []tag.Mutator{tag.Insert(tagPolicyDecisionKey, statusSampled)},
+ statPolicyDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Making provisional decision error", zap.Error(err))
+ }
+
+ // No need to continue
+ return provisionalDecision, policy
+ case sampling.NotSampled:
+ if provisionalDecision == sampling.Unspecified {
+ provisionalDecision = sampling.NotSampled
+ }
+ err := stats.RecordWithTags(
+ policy.ctx,
+ []tag.Mutator{tag.Insert(tagPolicyDecisionKey, statusNotSampled)},
+ statPolicyDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Making provisional decision error", zap.Error(err))
+ }
+ case sampling.SecondChance:
+ if provisionalDecision != sampling.Sampled {
+ provisionalDecision = sampling.SecondChance
+ }
+
+ err := stats.RecordWithTags(
+ policy.ctx,
+ []tag.Mutator{tag.Insert(tagPolicyDecisionKey, statusSecondChance)},
+ statPolicyDecision.M(int64(1)),
+ )
+ if err != nil {
+ cfsp.logger.Error("Making provisional decision error", zap.Error(err))
+ }
+ }
+ }
+
+ return provisionalDecision, nil
+}
+
+// ConsumeTraces is required by the SpanProcessor interface.
+func (cfsp *cascadingFilterSpanProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error {
+ cfsp.start.Do(func() {
+ cfsp.logger.Info("First trace data arrived, starting cascading_filter timers")
+ cfsp.policyTicker.Start(1 * time.Second)
+ })
+ resourceSpans := td.ResourceSpans()
+ for i := 0; i < resourceSpans.Len(); i++ {
+ resourceSpan := resourceSpans.At(i)
+ cfsp.processTraces(ctx, resourceSpan)
+ }
+ return nil
+}
+
+func (cfsp *cascadingFilterSpanProcessor) groupSpansByTraceKey(resourceSpans pdata.ResourceSpans) map[traceKey][]*pdata.Span {
+ idToSpans := make(map[traceKey][]*pdata.Span)
+ ilss := resourceSpans.InstrumentationLibrarySpans()
+ for j := 0; j < ilss.Len(); j++ {
+ ils := ilss.At(j)
+ spansLen := ils.Spans().Len()
+ for k := 0; k < spansLen; k++ {
+ span := ils.Spans().At(k)
+ tk := traceKey(span.TraceID().Bytes())
+ if len(tk) != 16 {
+ cfsp.logger.Warn("Span without valid TraceId")
+ }
+ idToSpans[tk] = append(idToSpans[tk], &span)
+ }
+ }
+ return idToSpans
+}
+
+func (cfsp *cascadingFilterSpanProcessor) processTraces(ctx context.Context, resourceSpans pdata.ResourceSpans) {
+ // Group spans per their traceId to minimize contention on idToTrace
+ idToSpans := cfsp.groupSpansByTraceKey(resourceSpans)
+ var newTraceIDs int64
+ for id, spans := range idToSpans {
+ lenSpans := int32(len(spans))
+ lenPolicies := len(cfsp.traceAcceptRules)
+ initialDecisions := make([]sampling.Decision, lenPolicies)
+
+ for i := 0; i < lenPolicies; i++ {
+ initialDecisions[i] = sampling.Pending
+ }
+ initialTraceData := &sampling.TraceData{
+ Decisions: initialDecisions,
+ ArrivalTime: time.Now(),
+ SpanCount: lenSpans,
+ }
+ d, loaded := cfsp.idToTrace.LoadOrStore(id, initialTraceData)
+
+ actualData := d.(*sampling.TraceData)
+ if loaded {
+ // PMM: why actualData is not updated with new trace?
+ atomic.AddInt32(&actualData.SpanCount, lenSpans)
+ } else {
+ newTraceIDs++
+ cfsp.decisionBatcher.AddToCurrentBatch(pdata.NewTraceID(id))
+ atomic.AddUint64(&cfsp.numTracesOnMap, 1)
+ postDeletion := false
+ currTime := time.Now()
+
+ for !postDeletion {
+ select {
+ case cfsp.deleteChan <- id:
+ postDeletion = true
+ default:
+ // Note this is a buffered channel, so this will only delete excessive traces (if they exist)
+ traceKeyToDrop := <-cfsp.deleteChan
+ cfsp.dropTrace(traceKeyToDrop, currTime)
+ }
+ }
+ }
+
+ // Add the spans to the trace, but only once for all policy, otherwise same spans will
+ // be duplicated in the final trace.
+ actualData.Lock()
+ traceTd := prepareTraceBatch(resourceSpans, spans)
+ actualData.ReceivedBatches = append(actualData.ReceivedBatches, traceTd)
+ finalDecision := actualData.FinalDecision
+ actualData.Unlock()
+
+ // This section is run in case the decision was already applied earlier
+ switch finalDecision {
+ case sampling.Unspecified:
+ // This has not been determined yet
+ case sampling.Pending:
+ // All process for pending done above, keep the case so it doesn't go to default.
+ case sampling.SecondChance:
+ // It shouldn't normally get here, keep the case so it doesn't go to default, like above.
+ case sampling.Sampled:
+ // Forward the spans to the policy destinations
+ traceTd := prepareTraceBatch(resourceSpans, spans)
+ if err := cfsp.nextConsumer.ConsumeTraces(ctx, traceTd); err != nil {
+ cfsp.logger.Warn("Error sending late arrived spans to destination",
+ zap.Error(err))
+ }
+ stats.Record(cfsp.ctx, statLateSpanArrivalAfterDecision.M(int64(time.Since(actualData.DecisionTime)/time.Second)))
+ case sampling.NotSampled:
+ stats.Record(cfsp.ctx, statLateSpanArrivalAfterDecision.M(int64(time.Since(actualData.DecisionTime)/time.Second)))
+ case sampling.Dropped:
+ stats.Record(cfsp.ctx, statLateSpanArrivalAfterDecision.M(int64(time.Since(actualData.DecisionTime)/time.Second)))
+ default:
+ cfsp.logger.Warn("Encountered unexpected sampling decision",
+ zap.Int("decision", int(finalDecision)))
+ }
+ }
+
+ stats.Record(cfsp.ctx, statNewTraceIDReceivedCount.M(newTraceIDs))
+}
+
+// func (cfsp *cascadingFilterSpanProcessor) GetCapabilities() component.ProcessorCapabilities {
+// return component.ProcessorCapabilities{MutatesConsumedData: false}
+// }
+
+func (cfsp *cascadingFilterSpanProcessor) Capabilities() consumer.Capabilities {
+ return consumer.Capabilities{MutatesData: false}
+}
+
+// Start is invoked during service startup.
+func (cfsp *cascadingFilterSpanProcessor) Start(context.Context, component.Host) error {
+ return nil
+}
+
+// Shutdown is invoked during service shutdown.
+func (cfsp *cascadingFilterSpanProcessor) Shutdown(context.Context) error {
+ return nil
+}
+
+func (cfsp *cascadingFilterSpanProcessor) dropTrace(traceID traceKey, deletionTime time.Time) {
+ var trace *sampling.TraceData
+ if d, ok := cfsp.idToTrace.Load(traceID); ok {
+ trace = d.(*sampling.TraceData)
+ cfsp.idToTrace.Delete(traceID)
+ // Subtract one from numTracesOnMap per https://godoc.org/sync/atomic#AddUint64
+ atomic.AddUint64(&cfsp.numTracesOnMap, ^uint64(0))
+ }
+ if trace == nil {
+ cfsp.logger.Error("Attempt to delete traceID not on table")
+ return
+ }
+
+ stats.Record(cfsp.ctx, statTraceRemovalAgeSec.M(int64(deletionTime.Sub(trace.ArrivalTime)/time.Second)))
+}
+
+func prepareTraceBatch(rss pdata.ResourceSpans, spans []*pdata.Span) pdata.Traces {
+ traceTd := pdata.NewTraces()
+ rs := traceTd.ResourceSpans().AppendEmpty()
+ rss.Resource().CopyTo(rs.Resource())
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+ ilsSpans := ils.Spans()
+ for _, span := range spans {
+ span.CopyTo(ilsSpans.AppendEmpty())
+ }
+ return traceTd
+}
+
+// tTicker interface allows easier testing of ticker related functionality used by cascadingfilterprocessor
+type tTicker interface {
+ // Start sets the frequency of the ticker and starts the periodic calls to OnTick.
+ Start(d time.Duration)
+ // OnTick is called when the ticker fires.
+ OnTick()
+ // Stops firing the ticker.
+ Stop()
+}
+
+type policyTicker struct {
+ ticker *time.Ticker
+ onTick func()
+}
+
+func (pt *policyTicker) Start(d time.Duration) {
+ pt.ticker = time.NewTicker(d)
+ go func() {
+ for range pt.ticker.C {
+ pt.OnTick()
+ }
+ }()
+}
+func (pt *policyTicker) OnTick() {
+ pt.onTick()
+}
+func (pt *policyTicker) Stop() {
+ pt.ticker.Stop()
+}
+
+var _ tTicker = (*policyTicker)(nil)
diff --git a/processor/cascadingfilterprocessor/processor_test.go b/processor/cascadingfilterprocessor/processor_test.go
new file mode 100644
index 000000000000..5c15f722066c
--- /dev/null
+++ b/processor/cascadingfilterprocessor/processor_test.go
@@ -0,0 +1,658 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cascadingfilterprocessor
+
+import (
+ "context"
+ "errors"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/consumer/consumertest"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/bigendianconverter"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/idbatcher"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/sampling"
+)
+
+const (
+ defaultTestDecisionWait = 30 * time.Second
+)
+
+//nolint:unused
+var testPolicy = []config.TraceAcceptCfg{{
+ Name: "test-policy",
+ SpansPerSecond: 1000,
+}}
+
+func TestSequentialTraceArrival(t *testing.T) {
+ traceIds, batches := generateIdsAndBatches(128)
+ cfg := config.Config{
+ DecisionWait: defaultTestDecisionWait,
+ NumTraces: uint64(2 * len(traceIds)),
+ ExpectedNewTracesPerSec: 64,
+ PolicyCfgs: testPolicy,
+ }
+ sp, err := newTraceProcessor(zap.NewNop(), consumertest.NewNop(), cfg)
+ require.NoError(t, err)
+ tsp := sp.(*cascadingFilterSpanProcessor)
+ for _, batch := range batches {
+ assert.NoError(t, tsp.ConsumeTraces(context.Background(), batch))
+ }
+
+ for i := range traceIds {
+ d, ok := tsp.idToTrace.Load(traceKey(traceIds[i].Bytes()))
+ require.True(t, ok, "Missing expected traceId")
+ v := d.(*sampling.TraceData)
+ require.Equal(t, int32(i+1), v.SpanCount, "Incorrect number of spans for entry %d", i)
+ }
+}
+
+func TestConcurrentTraceArrival(t *testing.T) {
+ traceIds, batches := generateIdsAndBatches(128)
+
+ var wg sync.WaitGroup
+ cfg := config.Config{
+ DecisionWait: defaultTestDecisionWait,
+ NumTraces: uint64(2 * len(traceIds)),
+ ExpectedNewTracesPerSec: 64,
+ PolicyCfgs: testPolicy,
+ }
+ sp, err := newTraceProcessor(zap.NewNop(), consumertest.NewNop(), cfg)
+ require.NoError(t, err)
+ tsp := sp.(*cascadingFilterSpanProcessor)
+ for _, batch := range batches {
+ // Add the same traceId twice.
+ wg.Add(2)
+ go func(td pdata.Traces) {
+ if err := tsp.ConsumeTraces(context.Background(), td); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ wg.Done()
+ }(batch)
+ go func(td pdata.Traces) {
+ if err := tsp.ConsumeTraces(context.Background(), td); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ wg.Done()
+ }(batch)
+ }
+
+ wg.Wait()
+
+ for i := range traceIds {
+ d, ok := tsp.idToTrace.Load(traceKey(traceIds[i].Bytes()))
+ require.True(t, ok, "Missing expected traceId")
+ v := d.(*sampling.TraceData)
+ require.Equal(t, int32(i+1)*2, v.SpanCount, "Incorrect number of spans for entry %d", i)
+ }
+}
+
+func TestSequentialTraceMapSize(t *testing.T) {
+ traceIds, batches := generateIdsAndBatches(210)
+ const maxSize = 100
+ cfg := config.Config{
+ DecisionWait: defaultTestDecisionWait,
+ NumTraces: uint64(maxSize),
+ ExpectedNewTracesPerSec: 64,
+ PolicyCfgs: testPolicy,
+ }
+ sp, err := newTraceProcessor(zap.NewNop(), consumertest.NewNop(), cfg)
+ require.NoError(t, err)
+ tsp := sp.(*cascadingFilterSpanProcessor)
+ for _, batch := range batches {
+ if err := tsp.ConsumeTraces(context.Background(), batch); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ }
+
+ // On sequential insertion it is possible to know exactly which traces should be still on the map.
+ for i := 0; i < len(traceIds)-maxSize; i++ {
+ _, ok := tsp.idToTrace.Load(traceKey(traceIds[i].Bytes()))
+ require.False(t, ok, "Found unexpected traceId[%d] still on map (id: %v)", i, traceIds[i])
+ }
+}
+
+func TestConcurrentTraceMapSize(t *testing.T) {
+ _, batches := generateIdsAndBatches(210)
+ const maxSize = 100
+ var wg sync.WaitGroup
+ cfg := config.Config{
+ DecisionWait: defaultTestDecisionWait,
+ NumTraces: uint64(maxSize),
+ ExpectedNewTracesPerSec: 64,
+ PolicyCfgs: testPolicy,
+ }
+ sp, err := newTraceProcessor(zap.NewNop(), consumertest.NewNop(), cfg)
+ require.NoError(t, err)
+ tsp := sp.(*cascadingFilterSpanProcessor)
+ for _, batch := range batches {
+ wg.Add(1)
+ go func(td pdata.Traces) {
+ if err := tsp.ConsumeTraces(context.Background(), td); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ wg.Done()
+ }(batch)
+ }
+
+ wg.Wait()
+
+ // Since we can't guarantee the order of insertion the only thing that can be checked is
+ // if the number of traces on the map matches the expected value.
+ cnt := 0
+ tsp.idToTrace.Range(func(_ interface{}, _ interface{}) bool {
+ cnt++
+ return true
+ })
+ require.Equal(t, maxSize, cnt, "Incorrect traces count on idToTrace")
+}
+
+func TestSamplingPolicyTypicalPath(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 5
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mpe := &mockPolicyEvaluator{}
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ traceAcceptRules: []*TraceAcceptEvaluator{{Name: "mock-policy", Evaluator: mpe, ctx: context.TODO()}},
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 10000,
+ }
+
+ _, batches := generateIdsAndBatches(210)
+ currItem := 0
+ numSpansPerBatchWindow := 10
+ // First evaluations shouldn't have anything to evaluate, until decision wait time passed.
+ for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
+ for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
+ if err := tsp.ConsumeTraces(context.Background(), batches[currItem]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.True(t, mtt.Started, "Time ticker was expected to have started")
+ }
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() != 0 || mpe.EvaluationCount != 0,
+ "policy for initial items was evaluated before decision wait period",
+ )
+ }
+
+ // Now the first batch that waited the decision period.
+ mpe.NextDecision = sampling.Sampled
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() == 0 || mpe.EvaluationCount == 0,
+ "policy should have been evaluated totalspans == %d and evaluationcount == %d",
+ msp.SpanCount(),
+ mpe.EvaluationCount,
+ )
+
+ require.Equal(t, numSpansPerBatchWindow, msp.SpanCount(), "not all spans of first window were accounted for")
+
+ // Late span of a sampled trace should be sent directly down the pipeline exporter
+ if err := tsp.ConsumeTraces(context.Background(), batches[0]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ expectedNumWithLateSpan := numSpansPerBatchWindow + 1
+ require.Equal(t, expectedNumWithLateSpan, msp.SpanCount(), "late span was not accounted for")
+}
+
+func TestSamplingMultiplePolicies(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 5
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mpe1 := &mockPolicyEvaluator{}
+ mpe2 := &mockPolicyEvaluator{}
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ traceAcceptRules: []*TraceAcceptEvaluator{
+ {
+ Name: "policy-1", Evaluator: mpe1, ctx: context.TODO(),
+ },
+ {
+ Name: "policy-2", Evaluator: mpe2, ctx: context.TODO(),
+ }},
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 10000,
+ }
+
+ _, batches := generateIdsAndBatches(210)
+ currItem := 0
+ numSpansPerBatchWindow := 10
+ // First evaluations shouldn't have anything to evaluate, until decision wait time passed.
+ for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
+ for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
+ if err := tsp.ConsumeTraces(context.Background(), batches[currItem]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+
+ require.True(t, mtt.Started, "Time ticker was expected to have started")
+ }
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() != 0 || mpe1.EvaluationCount != 0 || mpe2.EvaluationCount != 0,
+ "policy for initial items was evaluated before decision wait period",
+ )
+ }
+
+ // Single traceAcceptRules will decide to sample
+ mpe1.NextDecision = sampling.Sampled
+ mpe2.NextDecision = sampling.Unspecified
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() == 0 || mpe1.EvaluationCount == 0,
+ "policy should have been evaluated totalspans == %d and evaluationcount(1) == %d and evaluationcount(2) == %d",
+ msp.SpanCount(),
+ mpe1.EvaluationCount,
+ mpe2.EvaluationCount,
+ )
+
+ require.Equal(t, numSpansPerBatchWindow, msp.SpanCount(), "nextConsumer should've been called with exactly 1 batch of spans")
+
+ // Late span of a sampled trace should be sent directly down the pipeline exporter
+ if err := tsp.ConsumeTraces(context.Background(), batches[0]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+
+ expectedNumWithLateSpan := numSpansPerBatchWindow + 1
+ require.Equal(t, expectedNumWithLateSpan, msp.SpanCount(), "late span was not accounted for")
+}
+
+func TestSamplingPolicyDecisionNotSampled(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 5
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mpe := &mockPolicyEvaluator{}
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ traceAcceptRules: []*TraceAcceptEvaluator{{Name: "mock-policy", Evaluator: mpe, ctx: context.TODO()}},
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 10000,
+ }
+
+ _, batches := generateIdsAndBatches(210)
+ currItem := 0
+ numSpansPerBatchWindow := 10
+ // First evaluations shouldn't have anything to evaluate, until decision wait time passed.
+ for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
+ for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
+ if err := tsp.ConsumeTraces(context.Background(), batches[currItem]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.True(t, mtt.Started, "Time ticker was expected to have started")
+ }
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() != 0 || mpe.EvaluationCount != 0,
+ "policy for initial items was evaluated before decision wait period",
+ )
+ }
+
+ // Now the first batch that waited the decision period.
+ mpe.NextDecision = sampling.NotSampled
+ tsp.samplingPolicyOnTick()
+ require.EqualValues(t, 0, msp.SpanCount(), "exporter should have received zero spans")
+ require.EqualValues(t, 4, mpe.EvaluationCount, "policy should have been evaluated 4 times")
+
+ // Late span of a non-sampled trace should be ignored
+
+ if err := tsp.ConsumeTraces(context.Background(), batches[0]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.Equal(t, 0, msp.SpanCount())
+
+ mpe.NextDecision = sampling.Unspecified
+ mpe.NextError = errors.New("mock policy error")
+ tsp.samplingPolicyOnTick()
+ require.EqualValues(t, 0, msp.SpanCount(), "exporter should have received zero spans")
+ require.EqualValues(t, 6, mpe.EvaluationCount, "policy should have been evaluated 6 times")
+
+ // Late span of a non-sampled trace should be ignored
+ if err := tsp.ConsumeTraces(context.Background(), batches[0]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.Equal(t, 0, msp.SpanCount())
+}
+
+func TestSamplingPolicyDecisionDrop(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 5
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mpe := &mockPolicyEvaluator{}
+ mde := &mockDropEvaluator{}
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ traceAcceptRules: []*TraceAcceptEvaluator{{Name: "mock-policy", Evaluator: mpe, ctx: context.TODO()}},
+ traceRejectRules: []*TraceRejectEvaluator{{Name: "mock-drop-eval", Evaluator: mde, ctx: context.TODO()}},
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 10000,
+ }
+
+ _, batches := generateIdsAndBatches(210)
+ currItem := 0
+ numSpansPerBatchWindow := 10
+ // First evaluations shouldn't have anything to evaluate, until decision wait time passed.
+ for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
+ for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
+ if err := tsp.ConsumeTraces(context.Background(), batches[currItem]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.True(t, mtt.Started, "Time ticker was expected to have started")
+ }
+ tsp.samplingPolicyOnTick()
+ require.False(
+ t,
+ msp.SpanCount() != 0 || mpe.EvaluationCount != 0,
+ "policy for initial items was evaluated before decision wait period",
+ )
+ }
+
+ // Now the first batch that waited the decision period.
+ tsp.samplingPolicyOnTick()
+ require.EqualValues(t, 0, msp.SpanCount(), "exporter should have received zero spans since they were dropped")
+ require.EqualValues(t, 0, mpe.EvaluationCount, "policy should have been evaluated 0 times since it was dropped")
+}
+
+func TestSamplingPolicyDecisionNoLimitSet(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 2
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 0,
+ }
+
+ _, batches := generateIdsAndBatches(210)
+ currItem := 0
+ numSpansPerBatchWindow := 10
+
+ // First evaluations shouldn't have anything to evaluate, until decision wait time passed.
+ for evalNum := 0; evalNum < decisionWaitSeconds; evalNum++ {
+ for ; currItem < numSpansPerBatchWindow*(evalNum+1); currItem++ {
+ if err := tsp.ConsumeTraces(context.Background(), batches[currItem]); err != nil {
+ t.Errorf("Failed consuming traces: %v", err)
+ }
+ require.True(t, mtt.Started, "Time ticker was expected to have started")
+ }
+ tsp.samplingPolicyOnTick()
+ }
+
+ // Now the first batch that waited the decision period.
+ tsp.samplingPolicyOnTick()
+ //require.EqualValues(t, 210, msp.SpanCount(), "exporter should have received all spans since no rate limiting was applied")
+ require.EqualValues(t, 10, msp.SpanCount())
+}
+
+func TestMultipleBatchesAreCombinedIntoOne(t *testing.T) {
+ const maxSize = 100
+ const decisionWaitSeconds = 1
+ // For this test explicitly control the timer calls and batcher, and set a mock
+ // sampling policy evaluator.
+ msp := new(consumertest.TracesSink)
+ mpe := &mockPolicyEvaluator{}
+ mtt := &manualTTicker{}
+ tsp := &cascadingFilterSpanProcessor{
+ ctx: context.Background(),
+ nextConsumer: msp,
+ maxNumTraces: maxSize,
+ logger: zap.NewNop(),
+ decisionBatcher: newSyncIDBatcher(decisionWaitSeconds),
+ traceAcceptRules: []*TraceAcceptEvaluator{{Name: "mock-policy", Evaluator: mpe, ctx: context.TODO()}},
+ deleteChan: make(chan traceKey, maxSize),
+ policyTicker: mtt,
+ maxSpansPerSecond: 10000,
+ }
+
+ mpe.NextDecision = sampling.Sampled
+
+ traceIds, batches := generateIdsAndBatches(3)
+ for _, batch := range batches {
+ require.NoError(t, tsp.ConsumeTraces(context.Background(), batch))
+ }
+
+ tsp.samplingPolicyOnTick()
+ tsp.samplingPolicyOnTick()
+
+ require.EqualValues(t, 3, len(msp.AllTraces()), "There should be three batches, one for each trace")
+
+ expectedSpanIds := make(map[int][]pdata.SpanID)
+ expectedSpanIds[0] = []pdata.SpanID{
+ bigendianconverter.UInt64ToSpanID(uint64(1)),
+ }
+ expectedSpanIds[1] = []pdata.SpanID{
+ bigendianconverter.UInt64ToSpanID(uint64(2)),
+ bigendianconverter.UInt64ToSpanID(uint64(3)),
+ }
+ expectedSpanIds[2] = []pdata.SpanID{
+ bigendianconverter.UInt64ToSpanID(uint64(4)),
+ bigendianconverter.UInt64ToSpanID(uint64(5)),
+ bigendianconverter.UInt64ToSpanID(uint64(6)),
+ }
+
+ receivedTraces := msp.AllTraces()
+ for i, traceID := range traceIds {
+ trace := findTrace(receivedTraces, traceID)
+ require.NotNil(t, trace, "Trace was not received. TraceId %s", traceID.HexString())
+ require.EqualValues(t, i+1, trace.SpanCount(), "The trace should have all of its spans in a single batch")
+
+ expected := expectedSpanIds[i]
+ got := collectSpanIds(trace)
+
+ // might have received out of order, sort for comparison
+ sort.Slice(got, func(i, j int) bool {
+ a := bigendianconverter.SpanIDToUInt64(got[i])
+ b := bigendianconverter.SpanIDToUInt64(got[j])
+ return a < b
+ })
+
+ require.EqualValues(t, expected, got)
+ }
+}
+
+//nolint:unused
+func collectSpanIds(trace *pdata.Traces) []pdata.SpanID {
+ spanIDs := make([]pdata.SpanID, 0)
+
+ for i := 0; i < trace.ResourceSpans().Len(); i++ {
+ ilss := trace.ResourceSpans().At(i).InstrumentationLibrarySpans()
+
+ for j := 0; j < ilss.Len(); j++ {
+ ils := ilss.At(j)
+
+ for k := 0; k < ils.Spans().Len(); k++ {
+ span := ils.Spans().At(k)
+ spanIDs = append(spanIDs, span.SpanID())
+ }
+ }
+ }
+
+ return spanIDs
+}
+
+//nolint:unused
+func findTrace(a []pdata.Traces, traceID pdata.TraceID) *pdata.Traces {
+ for _, batch := range a {
+ id := batch.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0).TraceID()
+ if traceID.Bytes() == id.Bytes() {
+ return &batch
+ }
+ }
+ return nil
+}
+
+func generateIdsAndBatches(numIds int) ([]pdata.TraceID, []pdata.Traces) {
+ traceIds := make([]pdata.TraceID, numIds)
+ spanID := 0
+ var tds []pdata.Traces
+ for i := 0; i < numIds; i++ {
+ traceIds[i] = bigendianconverter.UInt64ToTraceID(1, uint64(i+1))
+ // Send each span in a separate batch
+ for j := 0; j <= i; j++ {
+ td := simpleTraces()
+ span := td.ResourceSpans().At(0).InstrumentationLibrarySpans().At(0).Spans().At(0)
+ span.SetTraceID(traceIds[i])
+
+ spanID++
+ span.SetSpanID(bigendianconverter.UInt64ToSpanID(uint64(spanID)))
+ tds = append(tds, td)
+ }
+ }
+
+ return traceIds, tds
+}
+
+//nolint:unused
+func simpleTraces() pdata.Traces {
+ return simpleTracesWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4}))
+}
+
+//nolint:unused
+func simpleTracesWithID(traceID pdata.TraceID) pdata.Traces {
+ traces := pdata.NewTraces()
+ rs := traces.ResourceSpans().AppendEmpty()
+
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+ span := ils.Spans().AppendEmpty()
+ span.SetTraceID(traceID)
+
+ return traces
+}
+
+type mockPolicyEvaluator struct {
+ NextDecision sampling.Decision
+ NextError error
+ EvaluationCount int
+ OnDroppedSpanCount int
+}
+
+type mockDropEvaluator struct{}
+
+var _ sampling.PolicyEvaluator = (*mockPolicyEvaluator)(nil)
+var _ sampling.DropTraceEvaluator = (*mockDropEvaluator)(nil)
+
+func (m *mockPolicyEvaluator) Evaluate(_ pdata.TraceID, _ *sampling.TraceData) sampling.Decision {
+ m.EvaluationCount++
+ return m.NextDecision
+}
+
+func (d *mockDropEvaluator) ShouldDrop(_ pdata.TraceID, _ *sampling.TraceData) bool {
+ return true
+}
+
+type manualTTicker struct {
+ Started bool
+}
+
+var _ tTicker = (*manualTTicker)(nil)
+
+func (t *manualTTicker) Start(time.Duration) {
+ t.Started = true
+}
+
+func (t *manualTTicker) OnTick() {
+}
+
+func (t *manualTTicker) Stop() {
+}
+
+type syncIDBatcher struct {
+ sync.Mutex
+ openBatch idbatcher.Batch
+ batchPipe chan idbatcher.Batch
+}
+
+var _ idbatcher.Batcher = (*syncIDBatcher)(nil)
+
+func newSyncIDBatcher(numBatches uint64) idbatcher.Batcher {
+ batches := make(chan idbatcher.Batch, numBatches)
+ for i := uint64(0); i < numBatches; i++ {
+ batches <- nil
+ }
+ return &syncIDBatcher{
+ batchPipe: batches,
+ }
+}
+
+func (s *syncIDBatcher) AddToCurrentBatch(id pdata.TraceID) {
+ s.Lock()
+ s.openBatch = append(s.openBatch, id)
+ s.Unlock()
+}
+
+func (s *syncIDBatcher) CloseCurrentAndTakeFirstBatch() (idbatcher.Batch, bool) {
+ s.Lock()
+ defer s.Unlock()
+ firstBatch := <-s.batchPipe
+ s.batchPipe <- s.openBatch
+ s.openBatch = nil
+ return firstBatch, true
+}
+
+func (s *syncIDBatcher) Stop() {
+}
diff --git a/processor/cascadingfilterprocessor/sampling/always_sample_test.go b/processor/cascadingfilterprocessor/sampling/always_sample_test.go
new file mode 100644
index 000000000000..308c92235b7a
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/always_sample_test.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+)
+
+func newAlwaysSample() *policyEvaluator {
+ return &policyEvaluator{
+ logger: zap.NewNop(),
+ maxSpansPerSecond: math.MaxInt32,
+ }
+}
+
+func TestEvaluate_AlwaysSample(t *testing.T) {
+ filter := newAlwaysSample()
+ decision := filter.Evaluate(
+ pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}),
+ newTraceStringAttrs(map[string]pdata.AttributeValue{}, "example", "value"),
+ )
+ assert.Equal(t, decision, Sampled)
+}
diff --git a/processor/cascadingfilterprocessor/sampling/doc.go b/processor/cascadingfilterprocessor/sampling/doc.go
new file mode 100644
index 000000000000..c41ee2577889
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package sampling contains the interfaces and data types used to implement
+// the various sampling policies.
+package sampling
diff --git a/processor/cascadingfilterprocessor/sampling/drop_trace_factory.go b/processor/cascadingfilterprocessor/sampling/drop_trace_factory.go
new file mode 100644
index 000000000000..16acc95b1d55
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/drop_trace_factory.go
@@ -0,0 +1,127 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "regexp"
+
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+)
+
+type dropTraceEvaluator struct {
+ numericAttr *numericAttributeFilter
+ stringAttr *stringAttributeFilter
+ operationRe *regexp.Regexp
+
+ logger *zap.Logger
+}
+
+var _ DropTraceEvaluator = (*dropTraceEvaluator)(nil)
+
+// NewDropTraceEvaluator creates a drop trace evaluator that checks if trace should be dropped
+func NewDropTraceEvaluator(logger *zap.Logger, cfg config.TraceRejectCfg) (DropTraceEvaluator, error) {
+ numericAttrFilter := createNumericAttributeFilter(cfg.NumericAttributeCfg)
+ stringAttrFilter := createStringAttributeFilter(cfg.StringAttributeCfg)
+
+ var operationRe *regexp.Regexp
+ var err error
+
+ if cfg.NamePattern != nil {
+ operationRe, err = regexp.Compile(*cfg.NamePattern)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &dropTraceEvaluator{
+ stringAttr: stringAttrFilter,
+ numericAttr: numericAttrFilter,
+ operationRe: operationRe,
+ logger: logger,
+ }, nil
+}
+
+// ShouldDrop checks if trace should be dropped
+func (dte *dropTraceEvaluator) ShouldDrop(_ pdata.TraceID, trace *TraceData) bool {
+ trace.Lock()
+ batches := trace.ReceivedBatches
+ trace.Unlock()
+
+ matchingOperationFound := false
+ matchingStringAttrFound := false
+ matchingNumericAttrFound := false
+
+ for _, batch := range batches {
+ rs := batch.ResourceSpans()
+
+ for i := 0; i < rs.Len(); i++ {
+ if dte.stringAttr != nil || dte.numericAttr != nil {
+ res := rs.At(i).Resource()
+ if !matchingStringAttrFound && dte.stringAttr != nil {
+ matchingStringAttrFound = checkIfStringAttrFound(res.Attributes(), dte.stringAttr)
+ }
+ if !matchingNumericAttrFound && dte.numericAttr != nil {
+ matchingNumericAttrFound = checkIfNumericAttrFound(res.Attributes(), dte.numericAttr)
+ }
+ }
+
+ ils := rs.At(i).InstrumentationLibrarySpans()
+ for j := 0; j < ils.Len(); j++ {
+ spans := ils.At(j).Spans()
+ for k := 0; k < spans.Len(); k++ {
+ span := spans.At(k)
+
+ if dte.stringAttr != nil || dte.numericAttr != nil {
+ if !matchingStringAttrFound && dte.stringAttr != nil {
+ matchingStringAttrFound = checkIfStringAttrFound(span.Attributes(), dte.stringAttr)
+ }
+ if !matchingNumericAttrFound && dte.numericAttr != nil {
+ matchingNumericAttrFound = checkIfNumericAttrFound(span.Attributes(), dte.numericAttr)
+ }
+ }
+
+ if dte.operationRe != nil && !matchingOperationFound {
+ if dte.operationRe.MatchString(span.Name()) {
+ matchingOperationFound = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ conditionMet := struct {
+ operationName, stringAttr, numericAttr bool
+ }{
+ operationName: true,
+ stringAttr: true,
+ numericAttr: true,
+ }
+
+ if dte.operationRe != nil {
+ conditionMet.operationName = matchingOperationFound
+ }
+ if dte.numericAttr != nil {
+ conditionMet.numericAttr = matchingNumericAttrFound
+ }
+ if dte.stringAttr != nil {
+ conditionMet.stringAttr = matchingStringAttrFound
+ }
+
+ return conditionMet.operationName && conditionMet.numericAttr && conditionMet.stringAttr
+}
diff --git a/processor/cascadingfilterprocessor/sampling/empty_test.go b/processor/cascadingfilterprocessor/sampling/empty_test.go
new file mode 100644
index 000000000000..28080c37f33c
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/empty_test.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
diff --git a/processor/cascadingfilterprocessor/sampling/numeric_tag_filter_test.go b/processor/cascadingfilterprocessor/sampling/numeric_tag_filter_test.go
new file mode 100644
index 000000000000..c824e1a43646
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/numeric_tag_filter_test.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "math"
+ "testing"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+)
+
+func newNumericAttributeFilter(minValue int64, maxValue int64) *policyEvaluator {
+ return &policyEvaluator{
+ logger: zap.NewNop(),
+ numericAttr: &numericAttributeFilter{
+ key: "example",
+ minValue: minValue,
+ maxValue: maxValue,
+ },
+ maxSpansPerSecond: math.MaxInt32,
+ }
+}
+
+func TestNumericTagFilter(t *testing.T) {
+ var empty = map[string]pdata.AttributeValue{}
+ filter := newNumericAttributeFilter(math.MinInt32, math.MaxInt32)
+
+ resAttr := map[string]pdata.AttributeValue{}
+ resAttr["example"] = pdata.NewAttributeValueInt(8)
+
+ cases := []struct {
+ Desc string
+ Trace *TraceData
+ Decision Decision
+ }{
+ {
+ Desc: "nonmatching span attribute",
+ Trace: newTraceIntAttrs(empty, "non_matching", math.MinInt32),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "span attribute with lower limit",
+ Trace: newTraceIntAttrs(empty, "example", math.MinInt32),
+ Decision: Sampled,
+ },
+ {
+ Desc: "span attribute with upper limit",
+ Trace: newTraceIntAttrs(empty, "example", math.MaxInt32),
+ Decision: Sampled,
+ },
+ {
+ Desc: "span attribute below min limit",
+ Trace: newTraceIntAttrs(empty, "example", math.MinInt32-1),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "span attribute above max limit",
+ Trace: newTraceIntAttrs(empty, "example", math.MaxInt32+1),
+ Decision: NotSampled,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Desc, func(t *testing.T) {
+ u, err := uuid.NewRandom()
+ require.NoError(t, err)
+ decision := filter.Evaluate(pdata.NewTraceID(u), c.Trace)
+ assert.Equal(t, decision, c.Decision)
+ })
+ }
+}
+
+func newTraceIntAttrs(nodeAttrs map[string]pdata.AttributeValue, spanAttrKey string, spanAttrValue int64) *TraceData {
+ var traceBatches []pdata.Traces
+ traces := pdata.NewTraces()
+ rs := traces.ResourceSpans().AppendEmpty()
+ rs.Resource().Attributes().InitFromMap(nodeAttrs)
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+ span := ils.Spans().AppendEmpty()
+ span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}))
+ span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}))
+ attributes := make(map[string]pdata.AttributeValue)
+ attributes[spanAttrKey] = pdata.NewAttributeValueInt(spanAttrValue)
+ span.Attributes().InitFromMap(attributes)
+ traceBatches = append(traceBatches, traces)
+ return &TraceData{
+ ReceivedBatches: traceBatches,
+ }
+}
diff --git a/processor/cascadingfilterprocessor/sampling/policy.go b/processor/cascadingfilterprocessor/sampling/policy.go
new file mode 100644
index 000000000000..8f6d23122f3d
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/policy.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/collector/model/pdata"
+)
+
+// TraceData stores the sampling related trace data.
+type TraceData struct {
+ sync.Mutex
+ // Decisions gives the current status of the sampling decision for each policy.
+ Decisions []Decision
+ // FinalDecision describes the ultimate fate of the trace
+ FinalDecision Decision
+ // SelectedByProbabilisticFilter determines if this trace was selected by probabilistic filter
+ SelectedByProbabilisticFilter bool
+ // Arrival time the first span for the trace was received.
+ ArrivalTime time.Time
+ // Decisiontime time when sampling decision was taken.
+ DecisionTime time.Time
+ // SpanCount track the number of spans on the trace.
+ SpanCount int32
+ // ReceivedBatches stores all the batches received for the trace.
+ ReceivedBatches []pdata.Traces
+}
+
+// Decision gives the status of sampling decision.
+type Decision int32
+
+const (
+ // Unspecified indicates that the status of the decision was not set yet.
+ Unspecified Decision = iota
+ // Pending indicates that the policy was not evaluated yet.
+ Pending
+ // Sampled is used to indicate that the decision was already taken
+ // to sample the data.
+ Sampled
+ // SecondChance is a special category that allows to make a final decision
+ // after all batches are processed. It should be converted to Sampled or NotSampled
+ SecondChance
+ // NotSampled is used to indicate that the decision was already taken
+ // to not sample the data.
+ NotSampled
+ // Dropped is used when data needs to be purged before the sampling policy
+ // had a chance to evaluate it.
+ Dropped
+)
+
+// PolicyEvaluator implements a cascading policy evaluator,
+// which makes a sampling decision for a given trace when requested.
+type PolicyEvaluator interface {
+ // Evaluate looks at the trace data and returns a corresponding SamplingDecision.
+ Evaluate(traceID pdata.TraceID, trace *TraceData) Decision
+}
+
+// DropTraceEvaluator implements a cascading policy evaluator,
+// which checks if trace should be dropped completely before making any other operations
+type DropTraceEvaluator interface {
+ // ShouldDrop checks if trace should be dropped
+ ShouldDrop(traceID pdata.TraceID, trace *TraceData) bool
+}
diff --git a/processor/cascadingfilterprocessor/sampling/policy_factory.go b/processor/cascadingfilterprocessor/sampling/policy_factory.go
new file mode 100644
index 000000000000..a3d0ab20a6e1
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/policy_factory.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "errors"
+ "regexp"
+ "time"
+
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cascadingfilterprocessor/config"
+)
+
+type numericAttributeFilter struct {
+ key string
+ minValue, maxValue int64
+}
+
+type stringAttributeFilter struct {
+ key string
+ values map[string]struct{}
+}
+
+type policyEvaluator struct {
+ numericAttr *numericAttributeFilter
+ stringAttr *stringAttributeFilter
+
+ operationRe *regexp.Regexp
+ minDuration *time.Duration
+ minNumberOfSpans *int
+ minNumberOfErrors *int
+
+ currentSecond int64
+ maxSpansPerSecond int32
+ spansInCurrentSecond int32
+
+ invertMatch bool
+
+ logger *zap.Logger
+}
+
+var _ PolicyEvaluator = (*policyEvaluator)(nil)
+
+func createNumericAttributeFilter(cfg *config.NumericAttributeCfg) *numericAttributeFilter {
+ if cfg == nil {
+ return nil
+ }
+
+ return &numericAttributeFilter{
+ key: cfg.Key,
+ minValue: cfg.MinValue,
+ maxValue: cfg.MaxValue,
+ }
+}
+
+func createStringAttributeFilter(cfg *config.StringAttributeCfg) *stringAttributeFilter {
+ if cfg == nil {
+ return nil
+ }
+
+ valuesMap := make(map[string]struct{})
+ for _, value := range cfg.Values {
+ if value != "" {
+ valuesMap[value] = struct{}{}
+ }
+ }
+
+ return &stringAttributeFilter{
+ key: cfg.Key,
+ values: valuesMap,
+ }
+}
+
+// NewProbabilisticFilter creates a policy evaluator intended for selecting samples probabilistically
+func NewProbabilisticFilter(logger *zap.Logger, maxSpanRate int32) (PolicyEvaluator, error) {
+ return &policyEvaluator{
+ logger: logger,
+ currentSecond: 0,
+ spansInCurrentSecond: 0,
+ maxSpansPerSecond: maxSpanRate,
+ }, nil
+}
+
+// NewFilter creates a policy evaluator that samples all traces with the specified criteria
+func NewFilter(logger *zap.Logger, cfg *config.TraceAcceptCfg) (PolicyEvaluator, error) {
+ numericAttrFilter := createNumericAttributeFilter(cfg.NumericAttributeCfg)
+ stringAttrFilter := createStringAttributeFilter(cfg.StringAttributeCfg)
+
+ var operationRe *regexp.Regexp
+ var err error
+
+ if cfg.PropertiesCfg.NamePattern != nil {
+ operationRe, err = regexp.Compile(*cfg.PropertiesCfg.NamePattern)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if cfg.PropertiesCfg.MinDuration != nil && *cfg.PropertiesCfg.MinDuration < 0*time.Second {
+ return nil, errors.New("minimum span duration must be a non-negative number")
+ }
+
+ if cfg.PropertiesCfg.MinNumberOfSpans != nil && *cfg.PropertiesCfg.MinNumberOfSpans < 1 {
+ return nil, errors.New("minimum number of spans must be a positive number")
+ }
+
+ return &policyEvaluator{
+ stringAttr: stringAttrFilter,
+ numericAttr: numericAttrFilter,
+ operationRe: operationRe,
+ minDuration: cfg.PropertiesCfg.MinDuration,
+ minNumberOfSpans: cfg.PropertiesCfg.MinNumberOfSpans,
+ minNumberOfErrors: cfg.PropertiesCfg.MinNumberOfErrors,
+ logger: logger,
+ currentSecond: 0,
+ spansInCurrentSecond: 0,
+ maxSpansPerSecond: cfg.SpansPerSecond,
+ invertMatch: cfg.InvertMatch,
+ }, nil
+}
diff --git a/processor/cascadingfilterprocessor/sampling/policy_filter.go b/processor/cascadingfilterprocessor/sampling/policy_filter.go
new file mode 100644
index 000000000000..b78313ba16ea
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/policy_filter.go
@@ -0,0 +1,227 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/model/pdata"
+)
+
+func tsToMicros(ts pdata.Timestamp) int64 {
+ return int64(ts / 1000)
+}
+
+func checkIfNumericAttrFound(attrs pdata.AttributeMap, filter *numericAttributeFilter) bool {
+ if v, ok := attrs.Get(filter.key); ok {
+ value := v.IntVal()
+ if value >= filter.minValue && value <= filter.maxValue {
+ return true
+ }
+ }
+ return false
+}
+
+func checkIfStringAttrFound(attrs pdata.AttributeMap, filter *stringAttributeFilter) bool {
+ if v, ok := attrs.Get(filter.key); ok {
+ truncableStr := v.StringVal()
+ if len(truncableStr) > 0 {
+ if _, ok := filter.values[truncableStr]; ok {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// evaluateRules goes through the defined properties and checks if they are matched
+func (pe *policyEvaluator) evaluateRules(_ pdata.TraceID, trace *TraceData) Decision {
+ trace.Lock()
+ batches := trace.ReceivedBatches
+ trace.Unlock()
+
+ matchingOperationFound := false
+ matchingStringAttrFound := false
+ matchingNumericAttrFound := false
+
+ spanCount := 0
+ errorCount := 0
+ minStartTime := int64(0)
+ maxEndTime := int64(0)
+
+ for _, batch := range batches {
+ rs := batch.ResourceSpans()
+
+ for i := 0; i < rs.Len(); i++ {
+ if pe.stringAttr != nil || pe.numericAttr != nil {
+ res := rs.At(i).Resource()
+ if !matchingStringAttrFound && pe.stringAttr != nil {
+ matchingStringAttrFound = checkIfStringAttrFound(res.Attributes(), pe.stringAttr)
+ }
+ if !matchingNumericAttrFound && pe.numericAttr != nil {
+ matchingNumericAttrFound = checkIfNumericAttrFound(res.Attributes(), pe.numericAttr)
+ }
+ }
+
+ ils := rs.At(i).InstrumentationLibrarySpans()
+ for j := 0; j < ils.Len(); j++ {
+ spans := ils.At(j).Spans()
+ spanCount += spans.Len()
+ for k := 0; k < spans.Len(); k++ {
+ span := spans.At(k)
+
+ if pe.stringAttr != nil || pe.numericAttr != nil {
+ if !matchingStringAttrFound && pe.stringAttr != nil {
+ matchingStringAttrFound = checkIfStringAttrFound(span.Attributes(), pe.stringAttr)
+ }
+ if !matchingNumericAttrFound && pe.numericAttr != nil {
+ matchingNumericAttrFound = checkIfNumericAttrFound(span.Attributes(), pe.numericAttr)
+ }
+ }
+
+ if pe.operationRe != nil && !matchingOperationFound {
+ if pe.operationRe.MatchString(span.Name()) {
+ matchingOperationFound = true
+ }
+ }
+
+ if pe.minDuration != nil {
+ startTs := tsToMicros(span.StartTimestamp())
+ endTs := tsToMicros(span.EndTimestamp())
+
+ if minStartTime == 0 {
+ minStartTime = startTs
+ maxEndTime = endTs
+ } else {
+ if startTs < minStartTime {
+ minStartTime = startTs
+ }
+ if endTs > maxEndTime {
+ maxEndTime = endTs
+ }
+ }
+ }
+
+ if span.Status().Code() == pdata.StatusCodeError {
+ errorCount++
+ }
+ }
+ }
+ }
+ }
+
+ conditionMet := struct {
+ operationName, minDuration, minSpanCount, stringAttr, numericAttr, minErrorCount bool
+ }{
+ operationName: true,
+ minDuration: true,
+ minSpanCount: true,
+ stringAttr: true,
+ numericAttr: true,
+ minErrorCount: true,
+ }
+
+ if pe.operationRe != nil {
+ conditionMet.operationName = matchingOperationFound
+ }
+ if pe.minNumberOfSpans != nil {
+ conditionMet.minSpanCount = spanCount >= *pe.minNumberOfSpans
+ }
+ if pe.minDuration != nil {
+ conditionMet.minDuration = maxEndTime > minStartTime && maxEndTime-minStartTime >= pe.minDuration.Microseconds()
+ }
+ if pe.numericAttr != nil {
+ conditionMet.numericAttr = matchingNumericAttrFound
+ }
+ if pe.stringAttr != nil {
+ conditionMet.stringAttr = matchingStringAttrFound
+ }
+ if pe.minNumberOfErrors != nil {
+ conditionMet.minErrorCount = errorCount >= *pe.minNumberOfErrors
+ }
+
+ if conditionMet.minSpanCount &&
+ conditionMet.minDuration &&
+ conditionMet.operationName &&
+ conditionMet.numericAttr &&
+ conditionMet.stringAttr &&
+ conditionMet.minErrorCount {
+ if pe.invertMatch {
+ return NotSampled
+ }
+ return Sampled
+ }
+
+ if pe.invertMatch {
+ return Sampled
+ }
+ return NotSampled
+}
+
+func (pe *policyEvaluator) shouldConsider(currSecond int64, trace *TraceData) bool {
+ if pe.maxSpansPerSecond < 0 {
+ // This emits "second chance" traces
+ return true
+ } else if trace.SpanCount > pe.maxSpansPerSecond {
+ // This trace will never fit, there are more spans than max limit
+ return false
+ } else if pe.currentSecond == currSecond && trace.SpanCount > pe.maxSpansPerSecond-pe.spansInCurrentSecond {
+ // This trace will not fit in this second, no way
+ return false
+ } else {
+ // This has some chances
+ return true
+ }
+}
+
+func (pe *policyEvaluator) emitsSecondChance() bool {
+ return pe.maxSpansPerSecond < 0
+}
+
+func (pe *policyEvaluator) updateRate(currSecond int64, numSpans int32) Decision {
+ if pe.currentSecond != currSecond {
+ pe.currentSecond = currSecond
+ pe.spansInCurrentSecond = 0
+ }
+
+ spansInSecondIfSampled := pe.spansInCurrentSecond + numSpans
+ if spansInSecondIfSampled <= pe.maxSpansPerSecond {
+ pe.spansInCurrentSecond = spansInSecondIfSampled
+ return Sampled
+ }
+
+ return NotSampled
+}
+
+// Evaluate looks at the trace data and returns a corresponding SamplingDecision. Also takes into account
+// the usage of sampling rate budget
+func (pe *policyEvaluator) Evaluate(traceID pdata.TraceID, trace *TraceData) Decision {
+ currSecond := time.Now().Unix()
+
+ if !pe.shouldConsider(currSecond, trace) {
+ return NotSampled
+ }
+
+ decision := pe.evaluateRules(traceID, trace)
+ if decision != Sampled {
+ return decision
+ }
+
+ if pe.emitsSecondChance() {
+ return SecondChance
+ }
+
+ return pe.updateRate(currSecond, trace.SpanCount)
+}
diff --git a/processor/cascadingfilterprocessor/sampling/rate_limiting_test.go b/processor/cascadingfilterprocessor/sampling/rate_limiting_test.go
new file mode 100644
index 000000000000..e9524190c1d0
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/rate_limiting_test.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+)
+
+func newRateLimiterFilter(maxRate int32) *policyEvaluator {
+ return &policyEvaluator{
+ logger: zap.NewNop(),
+ maxSpansPerSecond: maxRate,
+ }
+}
+
+func TestRateLimiter(t *testing.T) {
+ var empty = map[string]pdata.AttributeValue{}
+
+ trace := newTraceStringAttrs(empty, "example", "value")
+ traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})
+ rateLimiter := newRateLimiterFilter(3)
+
+ // Trace span count greater than spans per second
+ trace.SpanCount = 10
+ decision := rateLimiter.Evaluate(traceID, trace)
+ assert.Equal(t, decision, NotSampled)
+
+ // Trace span count just above to spans per second
+ trace.SpanCount = 4
+ decision = rateLimiter.Evaluate(traceID, trace)
+ assert.Equal(t, decision, NotSampled)
+
+ // Trace span count equal spans per second
+ trace.SpanCount = 3
+ decision = rateLimiter.Evaluate(traceID, trace)
+ assert.Equal(t, decision, Sampled)
+
+ // Trace span count less than spans per second
+ trace.SpanCount = 0
+ decision = rateLimiter.Evaluate(traceID, trace)
+ assert.Equal(t, decision, Sampled)
+}
diff --git a/processor/cascadingfilterprocessor/sampling/span_properties_filter_test.go b/processor/cascadingfilterprocessor/sampling/span_properties_filter_test.go
new file mode 100644
index 000000000000..e2ce44f908df
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/span_properties_filter_test.go
@@ -0,0 +1,185 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "math"
+ "regexp"
+ "testing"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+)
+
+var (
+ operationNamePattern = "foo.*"
+ minDuration = 500 * time.Microsecond
+ minNumberOfSpans = 2
+ minNumberOfErrors = 1
+)
+
+func newSpanPropertiesFilter(t *testing.T, operationNamePattern *string, minDuration *time.Duration, minNumberOfSpans *int, minNumberOfErrors *int) policyEvaluator {
+ var operationRe *regexp.Regexp
+ var err error
+ if operationNamePattern != nil {
+ operationRe, err = regexp.Compile(*operationNamePattern)
+ require.NoError(t, err)
+ }
+ return policyEvaluator{
+ logger: zap.NewNop(),
+ operationRe: operationRe,
+ minNumberOfSpans: minNumberOfSpans,
+ minDuration: minDuration,
+ maxSpansPerSecond: math.MaxInt32,
+ minNumberOfErrors: minNumberOfErrors,
+ }
+}
+
+func evaluate(t *testing.T, evaluator policyEvaluator, traces *TraceData, expectedDecision Decision) {
+ u, err := uuid.NewRandom()
+ require.NoError(t, err)
+ decision := evaluator.Evaluate(pdata.NewTraceID(u), traces)
+ assert.Equal(t, expectedDecision, decision)
+}
+
+func TestPartialSpanPropertiesFilter(t *testing.T) {
+ opFilter := newSpanPropertiesFilter(t, &operationNamePattern, nil, nil, nil)
+ durationFilter := newSpanPropertiesFilter(t, nil, &minDuration, nil, nil)
+ spansFilter := newSpanPropertiesFilter(t, nil, nil, &minNumberOfSpans, nil)
+ errorsFilter := newSpanPropertiesFilter(t, nil, nil, nil, &minNumberOfErrors)
+
+ cases := []struct {
+ Desc string
+ Evaluator policyEvaluator
+ }{
+ {
+ Desc: "operation name filter",
+ Evaluator: opFilter,
+ },
+ {
+ Desc: "duration filter",
+ Evaluator: durationFilter,
+ },
+ {
+ Desc: "number of spans filter",
+ Evaluator: spansFilter,
+ },
+ {
+ Desc: "errors filter",
+ Evaluator: errorsFilter,
+ },
+ }
+
+ matchingTraces := newTraceAttrs("foobar", 1000*time.Microsecond, 100, 100)
+ nonMatchingTraces := newTraceAttrs("bar", 100*time.Microsecond, 1, 0)
+
+ for _, c := range cases {
+ t.Run(c.Desc, func(t *testing.T) {
+ c.Evaluator.invertMatch = false
+ evaluate(t, c.Evaluator, matchingTraces, Sampled)
+ evaluate(t, c.Evaluator, nonMatchingTraces, NotSampled)
+
+ c.Evaluator.invertMatch = true
+ evaluate(t, c.Evaluator, matchingTraces, NotSampled)
+ evaluate(t, c.Evaluator, nonMatchingTraces, Sampled)
+ })
+ }
+}
+
+func TestSpanPropertiesFilter(t *testing.T) {
+ cases := []struct {
+ Desc string
+ Trace *TraceData
+ Decision Decision
+ }{
+ {
+ Desc: "fully matching",
+ Trace: newTraceAttrs("foobar", 1000*time.Microsecond, 100, 100),
+ Decision: Sampled,
+ },
+ {
+ Desc: "nonmatching operation name",
+ Trace: newTraceAttrs("non_matching", 1000*time.Microsecond, 100, 100),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "nonmatching duration",
+ Trace: newTraceAttrs("foobar", 100*time.Microsecond, 100, 100),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "nonmatching number of spans",
+ Trace: newTraceAttrs("foobar", 1000*time.Microsecond, 1, 1),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "nonmatching number of errors",
+ Trace: newTraceAttrs("foobar", 1000*time.Microsecond, 100, 0),
+ Decision: NotSampled,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Desc, func(t *testing.T) {
+ // Regular match
+ filter := newSpanPropertiesFilter(t, &operationNamePattern, &minDuration, &minNumberOfSpans, &minNumberOfErrors)
+ evaluate(t, filter, c.Trace, c.Decision)
+
+ // Invert match
+ filter.invertMatch = true
+ invertDecision := Sampled
+ if c.Decision == Sampled {
+ invertDecision = NotSampled
+ }
+ evaluate(t, filter, c.Trace, invertDecision)
+ })
+ }
+}
+
+func newTraceAttrs(operationName string, duration time.Duration, numberOfSpans int, numberOfErrors int) *TraceData {
+ endTs := time.Now().UnixNano()
+ startTs := endTs - duration.Nanoseconds()
+
+ var traceBatches []pdata.Traces
+
+ traces := pdata.NewTraces()
+ rs := traces.ResourceSpans().AppendEmpty()
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+
+ spans := ils.Spans()
+ spans.EnsureCapacity(numberOfSpans)
+
+ for i := 0; i < numberOfSpans; i++ {
+ span := spans.AppendEmpty()
+ span.SetName(operationName)
+ span.SetStartTimestamp(pdata.Timestamp(startTs))
+ span.SetEndTimestamp(pdata.Timestamp(endTs))
+ }
+
+ for i := 0; i < numberOfErrors && i < numberOfSpans; i++ {
+ span := spans.At(i)
+ span.Status().SetCode(pdata.StatusCodeError)
+ }
+
+ traceBatches = append(traceBatches, traces)
+
+ return &TraceData{
+ ReceivedBatches: traceBatches,
+ }
+}
diff --git a/processor/cascadingfilterprocessor/sampling/string_tag_filter_test.go b/processor/cascadingfilterprocessor/sampling/string_tag_filter_test.go
new file mode 100644
index 000000000000..0ee9882980ca
--- /dev/null
+++ b/processor/cascadingfilterprocessor/sampling/string_tag_filter_test.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sampling
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.opentelemetry.io/collector/model/pdata"
+ "go.uber.org/zap"
+)
+
+func newStringAttributeFilter() *policyEvaluator {
+ return &policyEvaluator{
+ logger: zap.NewNop(),
+ stringAttr: &stringAttributeFilter{
+ key: "example",
+ values: map[string]struct{}{"value": {}},
+ },
+ maxSpansPerSecond: math.MaxInt32,
+ }
+}
+
+func TestStringTagFilter(t *testing.T) {
+
+ var empty = map[string]pdata.AttributeValue{}
+ filter := newStringAttributeFilter()
+
+ cases := []struct {
+ Desc string
+ Trace *TraceData
+ Decision Decision
+ }{
+ {
+ Desc: "nonmatching node attribute key",
+ Trace: newTraceStringAttrs(map[string]pdata.AttributeValue{"non_matching": pdata.NewAttributeValueString("value")}, "", ""),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "nonmatching node attribute value",
+ Trace: newTraceStringAttrs(map[string]pdata.AttributeValue{"example": pdata.NewAttributeValueString("non_matching")}, "", ""),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "matching node attribute",
+ Trace: newTraceStringAttrs(map[string]pdata.AttributeValue{"example": pdata.NewAttributeValueString("value")}, "", ""),
+ Decision: Sampled,
+ },
+ {
+ Desc: "nonmatching span attribute key",
+ Trace: newTraceStringAttrs(empty, "nonmatching", "value"),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "nonmatching span attribute value",
+ Trace: newTraceStringAttrs(empty, "example", "nonmatching"),
+ Decision: NotSampled,
+ },
+ {
+ Desc: "matching span attribute",
+ Trace: newTraceStringAttrs(empty, "example", "value"),
+ Decision: Sampled,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Desc, func(t *testing.T) {
+ decision := filter.Evaluate(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), c.Trace)
+ assert.Equal(t, decision, c.Decision)
+ })
+ }
+}
+
+func newTraceStringAttrs(nodeAttrs map[string]pdata.AttributeValue, spanAttrKey string, spanAttrValue string) *TraceData {
+ var traceBatches []pdata.Traces
+ traces := pdata.NewTraces()
+ rs := traces.ResourceSpans().AppendEmpty()
+ rs.Resource().Attributes().InitFromMap(nodeAttrs)
+ ils := rs.InstrumentationLibrarySpans().AppendEmpty()
+ span := ils.Spans().AppendEmpty()
+ span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}))
+ span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}))
+ attributes := make(map[string]pdata.AttributeValue)
+ attributes[spanAttrKey] = pdata.NewAttributeValueString(spanAttrValue)
+ span.Attributes().InitFromMap(attributes)
+ traceBatches = append(traceBatches, traces)
+ return &TraceData{
+ ReceivedBatches: traceBatches,
+ }
+}
diff --git a/processor/cascadingfilterprocessor/testdata/cascading_filter_config.yaml b/processor/cascadingfilterprocessor/testdata/cascading_filter_config.yaml
new file mode 100644
index 000000000000..55a6e5675e82
--- /dev/null
+++ b/processor/cascadingfilterprocessor/testdata/cascading_filter_config.yaml
@@ -0,0 +1,66 @@
+receivers:
+ nop:
+
+exporters:
+ nop:
+
+processors:
+ cascading_filter/1:
+ probabilistic_filtering_rate: 100
+ trace_reject_filters:
+ - name: healthcheck-rule
+ name_pattern: "health.*"
+ trace_accept_filters:
+ - name: include-errors
+ spans_per_second: 200
+ properties:
+ min_number_of_errors: 2
+ - name: include-long-traces
+ spans_per_second: 300
+ properties:
+ min_number_of_spans: 10
+ - name: include-high-latency
+ spans_per_second: 400
+ properties:
+ min_duration: 9s
+ cascading_filter/2:
+ decision_wait: 10s
+ num_traces: 100
+ expected_new_traces_per_sec: 10
+ spans_per_second: 1000
+ probabilistic_filtering_ratio: 0.1
+ trace_reject_filters:
+ - name: healthcheck-rule
+ name_pattern: "health.*"
+ - name: remove-all-traces-with-healthcheck-service
+ string_attribute: {key: service.name, values: [healthcheck]}
+ trace_accept_filters:
+ - name: test-policy-1
+ - name: test-policy-2
+ numeric_attribute: {key: key1, min_value: 50, max_value: 100}
+ - name: test-policy-3
+ string_attribute: {key: key2, values: [value1, value2]}
+ - name: test-policy-4
+ spans_per_second: 35
+ - name: test-policy-5
+ spans_per_second: 123
+ numeric_attribute: {key: key1, min_value: 50, max_value: 100}
+ invert_match: true
+ - name: test-policy-6
+ spans_per_second: 50
+ properties: {min_duration: 9s }
+ - name: test-policy-7
+ properties:
+ name_pattern: "foo.*"
+ min_number_of_spans: 10
+ min_number_of_errors: 2
+ min_duration: 9s
+ - name: everything_else
+ spans_per_second: -1
+
+service:
+ pipelines:
+ traces:
+ receivers: [nop]
+ processors: [cascading_filter]
+ exporters: [nop]
diff --git a/processor/k8sprocessor/Makefile b/processor/k8sprocessor/Makefile
new file mode 100644
index 000000000000..c1496226e590
--- /dev/null
+++ b/processor/k8sprocessor/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
\ No newline at end of file
diff --git a/processor/k8sprocessor/README.md b/processor/k8sprocessor/README.md
new file mode 100644
index 000000000000..6e84eaf4ea00
--- /dev/null
+++ b/processor/k8sprocessor/README.md
@@ -0,0 +1,318 @@
+## Kubernetes Processor
+
+The `k8sprocessor` allow automatic tagging of spans with k8s metadata.
+
+It automatically discovers k8s resources (pods), extracts metadata from them and adds theextracted
+metadata to the relevant spans. The processor use the kubernetes API to discover all pods running
+in a cluster, keeps a record of their IP addresses and interesting metadata. Upon receiving spans,
+the processor tries to identify the source IP address of the service that sent the spans and matches
+it with the in memory data. If a match is found, the cached metadata is added to the spans as attributes.
+
+### Config
+
+There are several top level sections of the processor config:
+
+- `passthrough` (default = false): when set to true, only annotates resources with the pod IP and
+does not try to extract any other metadata. It does not need access to the K8S cluster API.
+Agent/Collector must receive spans directly from services to be able to correctly detect the pod IPs.
+- `owner_lookup_enabled` (default = false): when set to true, fields such as `daemonSetName`,
+`replicaSetName`, `service`, etc. can be extracted, though it requires fetching additional data to traverse
+the `owner` relationship. See the [list of fields](#k8sprocessor-extract) for more information over
+which tags require the flag to be enabled.
+- `extract`: the section (see [below](#k8sprocessor-extract)) allows specifying extraction rules
+- `filter`: the section (see [below](#k8sprocessor-filter)) allows specifying filters when matching pods
+
+#### Extract section
+
+Allows specifying extraction rules to extract data from k8s pod specs.
+
+- `metadata` (default = empty): specifies a list of strings that denote extracted fields. Following fields
+can be extracted:
+ - `containerId`
+ - `containerName`
+ - `containerImage`
+ - `clusterName`
+ - `daemonSetName` _(`owner_lookup_enabled` must be set to `true`)_
+ - `deploymentName`
+ - `hostName`
+ - `namespace`
+ - `nodeName`
+ - `podId`
+ - `podName`
+ - `replicaSetName` _(`owner_lookup_enabled` must be set to `true`)_
+ - `serviceName` _(`owner_lookup_enabled` must be set to `true`)_ - in case more than one service is assigned
+ to the pod, they are comma-separated
+ - `startTime`
+ - `statefulSetName` _(`owner_lookup_enabled` must be set to `true`)_
+
+ Also, see [example config](#k8sprocessor-example).
+- `tags`: specifies an optional map of custom tag names to be used. By default, following names are being assigned:
+ - `clusterName `: `k8s.cluster.name`
+ - `containerID `: `k8s.container.id`
+ - `containerImage `: `k8s.container.image`
+ - `containerName `: `k8s.container.name`
+ - `daemonSetName `: `k8s.daemonset.name`
+ - `deploymentName `: `k8s.deployment.name`
+ - `hostName `: `k8s.pod.hostname`
+ - `namespaceName `: `k8s.namespace.name`
+ - `nodeName `: `k8s.node.name`
+ - `podID `: `k8s.pod.id`
+ - `podName `: `k8s.pod.name`
+ - `replicaSetName `: `k8s.replicaset.name`
+ - `serviceName `: `k8s.service.name`
+ - `statefulSetName`: `k8s.statefulset.name`
+ - `startTime `: `k8s.pod.startTime`
+
+ When custom value is specified, specified fields use provided names when being tagged, e.g.:
+ ```yaml
+ tags:
+ containerId: my-custom-tag-for-container-id
+ nodeName: node_name
+ ```
+ - `annotations` (default = empty): a list of rules for extraction and recording annotation data.
+See [field extract config](#k8sprocessor-field-extract) for an example on how to use it.
+- `labels` (default = empty): a list of rules for extraction and recording label data.
+See [field extract config](#k8sprocessor-field-extract) for an example on how to use it.
+- `namespace_labels` (default = empty): a list of rules for extraction and recording namespace label data.
+See [field extract config](#k8sprocessor-field-extract) for an example on how to use it.
+
+#### Field Extract Config
+
+Allows specifying an extraction rule to extract a value from exactly one field.
+
+The field accepts a list of maps accepting three keys: `tag-name`, `key` and `regex`
+
+- `tag-name`: represents the name of the tag that will be added to the span. When not specified
+a default tag name will be used of the format: `k8s..` For example, if
+`tag-name` is not specified and the key is `git_sha`, then the span name will be `k8s.annotation.deployment.git_sha`
+
+- `key`: represents the annotation name. This must exactly match an annotation name. To capture
+all keys, `*` can be used
+
+- `regex`: is an optional field used to extract a sub-string from a complex field value.
+The supplied regular expression must contain one named parameter with the string "value"
+as the name. For example, if your pod spec contains the following annotation,
+`kubernetes.io/change-cause: 2019-08-28T18:34:33Z APP_NAME=my-app GIT_SHA=58a1e39 CI_BUILD=4120`
+and you'd like to extract the GIT_SHA and the CI_BUILD values as tags, then you must specify
+the following two extraction rules:
+
+ ```yaml
+ procesors:
+ k8s-tagger:
+ annotations:
+ - tag_name: git.sha
+ key: kubernetes.io/change-cause
+ regex: GIT_SHA=(?P\w+)
+ - tag_name: ci.build
+ key: kubernetes.io/change-cause
+ regex: JENKINS=(?P[\w]+)
+ ```
+
+ this will add the `git.sha` and `ci.build` tags to the spans. It is also possible to generically fetch
+ all keys and fill them into a template. To substitute the original name, use `%s`. For example:
+
+ ```yaml
+ procesors:
+ k8s-tagger:
+ annotations:
+ - tag_name: k8s.annotation/%s
+ key: *
+ ```
+
+#### Filter section
+
+FilterConfig section allows specifying filters to filter pods by labels, fields, namespaces, nodes, etc.
+
+- `node` (default = ""): represents a k8s node or host. If specified, any pods not running on the specified
+node will be ignored by the tagger.
+- `node_from_env_var` (default = ""): can be used to extract the node name from an environment variable.
+The value must be the name of the environment variable. This is useful when the node a Otel agent will
+run on cannot be predicted. In such cases, the Kubernetes downward API can be used to add the node name
+to each pod as an environment variable. K8s tagger can then read this value and filter pods by it.
+For example, node name can be passed to each agent with the downward API as follows
+
+ ```yaml
+ env:
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ ```
+
+ Then the NodeFromEnv field can be set to `K8S_NODE_NAME` to filter all pods by the node that the agent
+ is running on. More on downward API here:
+ https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/
+- `namespace` (default = ""): filters all pods by the provided namespace. All other pods are ignored.
+- `fields` (default = empty): a list of maps accepting three keys: `key`, `value`, `op`. Allows to filter
+pods by generic k8s fields. Only the following operations (`op`) are supported: `equals`, `not-equals`.
+For example, to match pods having `key1=value1` and `key2<>value2` condition met for fields, one can specify:
+
+ ```yaml
+ fields:
+ - key: key1 # `op` defaults to "equals" when not specified
+ value: value1
+ - key: key2
+ value: value2
+ op: not-equals
+ ```
+
+- `labels` (default = empty): a list of maps accepting three keys: `key`, `value`, `op`. Allows to filter
+pods by generic k8s pod labels. Only the following operations (`op`) are supported: `equals`, `not-equals`,
+`exists`, `not-exists`. For example, to match pods where `label1` exists, one can specify
+
+ ```yaml
+ fields:
+ - key: label1
+ op: exists
+ ```
+
+#### Example config:
+
+```yaml
+processors:
+ k8s_tagger:
+ passthrough: false
+ owner_lookup_enabled: true # To enable fetching additional metadata using `owner` relationship
+ extract:
+ metadata:
+ # extract the following well-known metadata fields
+ - containerId
+ - containerName
+ - containerImage
+ - clusterName
+ - daemonSetName
+ - deploymentName
+ - hostName
+ - namespace
+ - nodeName
+ - podId
+ - podName
+ - replicaSetName
+ - serviceName
+ - startTime
+ - statefulSetName
+ tags:
+ # It is possible to provide your custom key names for each of the extracted metadata fields,
+ # e.g. to store podName as "pod_name" rather than the default "k8s.pod.name", use following:
+ podName: pod_name
+
+ annotations:
+ # Extract all annotations using a template
+ - tag_name: k8s.annotation.%s
+ key: "*"
+ labels:
+ - tag_name: l1 # extracts value of label with key `label1` and inserts it as a tag with key `l1`
+ key: label1
+ - tag_name: l2 # extracts value of label with key `label1` with regexp and inserts it as a tag with key `l2`
+ key: label2
+ regex: field=(?P.+)
+
+ filter:
+ namespace: ns2 # only look for pods running in ns2 namespace
+ node: ip-111.us-west-2.compute.internal # only look for pods running on this node/host
+ node_from_env_var: K8S_NODE # only look for pods running on the node/host specified by the K8S_NODE environment variable
+ labels: # only consider pods that match the following labels
+ - key: key1 # match pods that have a label `key1=value1`. `op` defaults to "equals" when not specified
+ value: value1
+ - key: key2 # ignore pods that have a label `key2=value2`.
+ value: value2
+ op: not-equals
+ fields: # works the same way as labels but for fields instead (like annotations)
+ - key: key1
+ value: value1
+ - key: key2
+ value: value2
+ op: not-equals
+```
+
+### RBAC
+
+TODO: mention the required RBAC rules.
+
+### Deployment scenarios
+
+The processor supports running both in agent and collector mode.
+
+#### As an agent
+
+When running as an agent, the processor detects IP addresses of pods sending spans to the agent and uses this
+information to extract metadata from pods and add to spans. When running as an agent, it is important to apply
+a discovery filter so that the processor only discovers pods from the same host that it is running on. Not using
+such a filter can result in unnecessary resource usage especially on very large clusters. Once the fitler is applied,
+each processor will only query the k8s API for pods running on it's own node.
+
+Node filter can be applied by setting the `filter.node` config option to the name of a k8s node. While this works
+as expected, it cannot be used to automatically filter pods by the same node that the processor is running on in
+most cases as it is not know before hand which node a pod will be scheduled on. Luckily, kubernetes has a solution
+for this called the downward API. To automatically filter pods by the node the processor is running on, you'll need
+to complete the following steps:
+
+1. Use the downward API to inject the node name as an environment variable.
+Add the following snippet under the pod env section of the OpenTelemetry container.
+
+ ```yaml
+ env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ ```
+
+ This will inject a new environment variable to the OpenTelemetry container with the value as the
+ name of the node the pod was scheduled to run on.
+
+2. Set "filter.node_from_env_var" to the name of the environment variable holding the node name.
+
+ ```yaml
+ k8s_tagger:
+ filter:
+ node_from_env_var: KUBE_NODE_NAME # this should be same as the var name used in previous step
+ ```
+
+ This will restrict each OpenTelemetry agent to query pods running on the same node only dramatically reducing
+ resource requirements for very large clusters.
+
+#### As a collector
+
+The processor can be deployed both as an agent or as a collector.
+
+When running as a collector, the processor cannot correctly detect the IP address of the pods generating
+the spans when it receives the spans from an agent instead of receiving them directly from the pods. To
+workaround this issue, agents deployed with the k8s_tagger processor can be configured to detect
+the IP addresses and forward them along with the span resources. Collector can then match this IP address
+with k8s pods and enrich the spans with the metadata. In order to set this up, you'll need to complete the
+following steps:
+
+1. Setup agents in passthrough mode
+Configure the agents' k8s_tagger processors to run in passthrough mode.
+
+ ```yaml
+ # k8s_tagger config for agent
+ k8s_tagger:
+ passthrough: true
+ ```
+ This will ensure that the agents detect the IP address as add it as an attribute to all span resources.
+ Agents will not make any k8s API calls, do any discovery of pods or extract any metadata.
+
+2. Configure the collector as usual
+No special configuration changes are needed to be made on the collector. It'll automatically detect
+the IP address of spans sent by the agents as well as directly by other services/pods.
+
+
+### Caveats
+
+There are some edge-cases and scenarios where k8s_tagger will not work properly.
+
+
+#### Host networking mode
+
+The processor cannot correct identify pods running in the host network mode and
+enriching spans generated by such pods is not supported at the moment.
+
+#### As a sidecar
+
+The processor does not support detecting containers from the same pods when running
+as a sidecar. While this can be done, we think it is simpler to just use the kubernetes
+downward API to inject environment variables into the pods and directly use their values
+as tags.
diff --git a/processor/k8sprocessor/client_test.go b/processor/k8sprocessor/client_test.go
new file mode 100644
index 000000000000..6b1337a1d920
--- /dev/null
+++ b/processor/k8sprocessor/client_test.go
@@ -0,0 +1,75 @@
+// Copyright 2020 OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8sprocessor
+
+import (
+ "go.uber.org/zap"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sprocessor/kube"
+)
+
+// fakeClient is used as a replacement for WatchClient in test cases.
+type fakeClient struct {
+ Pods map[kube.PodIdentifier]*kube.Pod
+ Rules kube.ExtractionRules
+ Filters kube.Filters
+ Associations []kube.Association
+ Informer cache.SharedInformer
+ StopCh chan struct{}
+}
+
+func selectors() (labels.Selector, fields.Selector) {
+ var selectors []fields.Selector
+ return labels.Everything(), fields.AndSelectors(selectors...)
+}
+
+// newFakeClient instantiates a new FakeClient object and satisfies the ClientProvider type
+func newFakeClient(_ *zap.Logger, apiCfg k8sconfig.APIConfig, rules kube.ExtractionRules, filters kube.Filters, associations []kube.Association, _ kube.APIClientsetProvider, _ kube.InformerProvider, _ kube.OwnerProvider) (kube.Client, error) {
+ cs := fake.NewSimpleClientset()
+
+ ls, fs := selectors()
+ return &fakeClient{
+ Pods: map[kube.PodIdentifier]*kube.Pod{},
+ Rules: rules,
+ Filters: filters,
+ Associations: associations,
+ Informer: kube.NewFakeInformer(cs, "", ls, fs),
+ StopCh: make(chan struct{}),
+ }, nil
+}
+
+// GetPod looks up FakeClient.Pods map by the provided string,
+// which might represent either IP address or Pod UID.
+func (f *fakeClient) GetPod(identifier kube.PodIdentifier) (*kube.Pod, bool) {
+ p, ok := f.Pods[identifier]
+ return p, ok
+}
+
+// Start is a noop for FakeClient.
+func (f *fakeClient) Start() {
+ if f.Informer != nil {
+ f.Informer.Run(f.StopCh)
+ }
+}
+
+// Stop is a noop for FakeClient.
+func (f *fakeClient) Stop() {
+ close(f.StopCh)
+}
diff --git a/processor/k8sprocessor/config.go b/processor/k8sprocessor/config.go
new file mode 100644
index 000000000000..1396678fbaf2
--- /dev/null
+++ b/processor/k8sprocessor/config.go
@@ -0,0 +1,221 @@
+// Copyright 2020 OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8sprocessor
+
+import (
+ "go.opentelemetry.io/collector/config"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
+)
+
+// Config defines configuration for k8s attributes processor.
+type Config struct {
+ config.ProcessorSettings `mapstructure:"-"`
+
+ k8sconfig.APIConfig `mapstructure:",squash"`
+
+ // Passthrough mode only annotates resources with the pod IP and
+ // does not try to extract any other metadata. It does not need
+ // access to the K8S cluster API. Agent/Collector must receive spans
+ // directly from services to be able to correctly detect the pod IPs.
+ Passthrough bool `mapstructure:"passthrough"`
+
+ // OwnerLookupEnabled enables pulling owner data, which triggers
+ // additional calls to Kubernetes API
+ OwnerLookupEnabled bool `mapstructure:"owner_lookup_enabled"`
+
+ // Extract section allows specifying extraction rules to extract
+ // data from k8s pod specs
+ Extract ExtractConfig `mapstructure:"extract"`
+
+ // Filter section allows specifying filters to filter
+ // pods by labels, fields, namespaces, nodes, etc.
+ Filter FilterConfig `mapstructure:"filter"`
+
+ // Association section allows to define rules for tagging spans, metrics,
+ // and logs with Pod metadata.
+ Association []PodAssociationConfig `mapstructure:"pod_association"`
+}
+
+func (cfg *Config) Validate() error {
+ return cfg.APIConfig.Validate()
+}
+
+// ExtractConfig section allows specifying extraction rules to extract
+// data from k8s pod specs.
+type ExtractConfig struct {
+ // Metadata allows to extract pod metadata from a list of metadata fields.
+ // The field accepts a list of strings.
+ //
+ // Metadata fields supported right now are,
+ // namespace, podName, podUID, deployment, cluster, node and startTime
+ //
+ // Specifying anything other than these values will result in an error.
+ // By default all of the fields are extracted and added to spans and metrics.
+ Metadata []string `mapstructure:"metadata"`
+
+ // Tags allow to specify output name used for each of the kubernetes tags
+ // The field accepts a map of string->string. It is optional and if no values
+ // are provided, defaults will be used
+ Tags map[string]string `mapstructure:"tags"`
+
+ // Annotations allows extracting data from pod annotations and record it
+ // as resource attributes.
+ // It is a list of FieldExtractConfig type. See FieldExtractConfig
+ // documentation for more details.
+ Annotations []FieldExtractConfig `mapstructure:"annotations"`
+
+ // Labels allows extracting data from pod labels and record it
+ // as resource attributes.
+ // It is a list of FieldExtractConfig type. See FieldExtractConfig
+ // documentation for more details.
+ Labels []FieldExtractConfig `mapstructure:"labels"`
+
+ // NamespaceLabels allows extracting data from namespace labels and record it
+ // as resource attributes.
+ // It is a list of FieldExtractConfig type. See FieldExtractConfig
+ // documentation for more details.
+ NamespaceLabels []FieldExtractConfig `mapstructure:"namespace_labels"`
+}
+
+//FieldExtractConfig allows specifying an extraction rule to extract a value from exactly one field.
+//
+// The field accepts a list FilterExtractConfig map. The map accepts three keys
+// tag_name, key and regex
+//
+// - tag_name represents the name of the tag that will be added to the span.
+// When not specified a default tag name will be used of the format:
+// k8s.pod.annotations.
+// k8s.pod.labels.