diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 5683cfc1f3..0000000000 --- a/.flake8 +++ /dev/null @@ -1,37 +0,0 @@ -[flake8] -ignore = - # line too long, defer to black - E501 - - # allow line breaks before binary ops - W503 - - # allow line breaks after binary ops - W504 - - # allow whitespace before ':' (https://github.com/psf/black#slices) - E203 - - # conflicts with black - E701 - E704 - -exclude = - .bzr - .git - .hg - .svn - .tox - CVS - .venv*/ - venv*/ - target - __pycache__ - exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/gen/ - exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/gen/ - exporter/opentelemetry-exporter-jaeger/build/* - docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/grpc/gen/ - docs/examples/opentelemetry-example-app/build/* - opentelemetry-proto/build/* - opentelemetry-proto/src/opentelemetry/proto/ - scripts/* diff --git a/.github/workflows/generate_workflows.py b/.github/workflows/generate_workflows.py index dbd128bc43..bda8eee827 100644 --- a/.github/workflows/generate_workflows.py +++ b/.github/workflows/generate_workflows.py @@ -1,9 +1,9 @@ from pathlib import Path from generate_workflows_lib import ( - generate_test_workflow, generate_lint_workflow, - generate_misc_workflow + generate_misc_workflow, + generate_test_workflow, ) tox_ini_path = Path(__file__).parent.parent.parent.joinpath("tox.ini") diff --git a/.github/workflows/generate_workflows_lib/hatch_build.py b/.github/workflows/generate_workflows_lib/hatch_build.py index aedf360a35..aff625f20e 100644 --- a/.github/workflows/generate_workflows_lib/hatch_build.py +++ b/.github/workflows/generate_workflows_lib/hatch_build.py @@ -1,15 +1,17 @@ -from hatchling.builders.hooks.plugin.interface import BuildHookInterface from pathlib import Path +from hatchling.builders.hooks.plugin.interface import BuildHookInterface -class CustomBuildHook(BuildHookInterface): +class CustomBuildHook(BuildHookInterface): def initialize(self, version, build_data): - with open( Path(__file__).parent.parent.parent.parent.joinpath("tox.ini") ) as tox_ini_file_0: with open( - Path(__file__).parent.joinpath("src/generate_workflows_lib/tox.ini"), "w" + Path(__file__).parent.joinpath( + "src/generate_workflows_lib/tox.ini" + ), + "w", ) as tox_ini_file_1: tox_ini_file_1.write(tox_ini_file_0.read()) diff --git a/.github/workflows/generate_workflows_lib/src/generate_workflows_lib/__init__.py b/.github/workflows/generate_workflows_lib/src/generate_workflows_lib/__init__.py index 31f11062c4..0308fbe5f3 100644 --- a/.github/workflows/generate_workflows_lib/src/generate_workflows_lib/__init__.py +++ b/.github/workflows/generate_workflows_lib/src/generate_workflows_lib/__init__.py @@ -1,12 +1,12 @@ +from collections import defaultdict +from pathlib import Path from re import compile as re_compile + from jinja2 import Environment, FileSystemLoader -from pathlib import Path from tox.config.cli.parse import get_options -from tox.session.state import State from tox.config.sets import CoreConfigSet from tox.config.source.tox_ini import ToxIni -from collections import defaultdict - +from tox.session.state import State _tox_test_env_regex = re_compile( r"(?Ppy\w+)-test-" @@ -19,27 +19,23 @@ def get_tox_envs(tox_ini_path: Path) -> list: - tox_ini = ToxIni(tox_ini_path) conf = State(get_options(), []).conf tox_section = next(tox_ini.sections()) - core_config_set = ( - CoreConfigSet(conf, tox_section, tox_ini_path.parent, tox_ini_path) + core_config_set = CoreConfigSet( + conf, tox_section, tox_ini_path.parent, tox_ini_path ) ( - core_config_set. - loaders. - extend( - tox_ini. - get_loaders( + core_config_set.loaders.extend( + tox_ini.get_loaders( tox_section, base=[], override_map=defaultdict(list, {}), - conf=core_config_set + conf=core_config_set, ) ) ) @@ -48,11 +44,7 @@ def get_tox_envs(tox_ini_path: Path) -> list: def get_test_job_datas(tox_envs: list, operating_systems: list) -> list: - - os_alias = { - "ubuntu-latest": "Ubuntu", - "windows-latest": "Windows" - } + os_alias = {"ubuntu-latest": "Ubuntu", "windows-latest": "Windows"} python_version_alias = { "pypy3": "pypy-3.8", @@ -67,7 +59,6 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list: for operating_system in operating_systems: for tox_env in tox_envs: - tox_test_env_match = _tox_test_env_regex.match(tox_env) if tox_test_env_match is None: @@ -75,9 +66,9 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list: groups = tox_test_env_match.groupdict() - aliased_python_version = ( - python_version_alias[groups["python_version"]] - ) + aliased_python_version = python_version_alias[ + groups["python_version"] + ] tox_env = tox_test_env_match.string test_requirements = groups["test_requirements"] @@ -99,20 +90,17 @@ def get_test_job_datas(tox_envs: list, operating_systems: list) -> list: ), "python_version": aliased_python_version, "tox_env": tox_env, - "os": operating_system + "os": operating_system, } - ) return test_job_datas def get_lint_job_datas(tox_envs: list) -> list: - lint_job_datas = [] for tox_env in tox_envs: - tox_lint_env_match = _tox_lint_env_regex.match(tox_env) if tox_lint_env_match is None: @@ -126,18 +114,15 @@ def get_lint_job_datas(tox_envs: list) -> list: "ui_name": f"{tox_lint_env_match.groupdict()['name']}", "tox_env": tox_env, } - ) return lint_job_datas def get_contrib_job_datas(tox_envs: list) -> list: - contrib_job_datas = [] for tox_env in tox_envs: - tox_contrib_env_match = _tox_contrib_env_regex.match(tox_env) if tox_contrib_env_match is None: @@ -157,30 +142,25 @@ def get_contrib_job_datas(tox_envs: list) -> list: contrib_job_datas.append( { - "ui_name": ( - f"{groups['name']}" - f"{contrib_requirements}" - ), + "ui_name": (f"{groups['name']}" f"{contrib_requirements}"), "tox_env": tox_env, } - ) return contrib_job_datas def get_misc_job_datas(tox_envs: list) -> list: - misc_job_datas = [] _tox_benchmark_env_regex = re_compile(r"benchmark.+") for tox_env in tox_envs: if ( - _tox_test_env_regex.match(tox_env) is not None or - _tox_lint_env_regex.match(tox_env) is not None or - _tox_contrib_env_regex.match(tox_env) is not None or - _tox_benchmark_env_regex.match(tox_env) is not None + _tox_test_env_regex.match(tox_env) is not None + or _tox_lint_env_regex.match(tox_env) is not None + or _tox_contrib_env_regex.match(tox_env) is not None + or _tox_benchmark_env_regex.match(tox_env) is not None ): continue @@ -192,41 +172,32 @@ def get_misc_job_datas(tox_envs: list) -> list: def _generate_workflow( job_datas: list, name: str, workflow_directory_path: Path ): - # Github seems to limit the amount of jobs in a workflow file, that is why # they are split in groups of 250 per workflow file. for file_number, job_datas in enumerate( [ - job_datas[index:index + 250] + job_datas[index : index + 250] for index in range(0, len(job_datas), 250) ] ): - with open( - workflow_directory_path.joinpath(f"{name}_{file_number}.yml"), - "w" + workflow_directory_path.joinpath(f"{name}_{file_number}.yml"), "w" ) as test_yml_file: - test_yml_file.write( - Environment( - loader=FileSystemLoader(Path(__file__).parent) - ).get_template(f"{name}.yml.j2").render( - job_datas=job_datas, file_number=file_number - ) + Environment(loader=FileSystemLoader(Path(__file__).parent)) + .get_template(f"{name}.yml.j2") + .render(job_datas=job_datas, file_number=file_number) ) test_yml_file.write("\n") def generate_test_workflow( - tox_ini_path: Path, - workflow_directory_path: Path, - *operating_systems + tox_ini_path: Path, workflow_directory_path: Path, *operating_systems ) -> None: - _generate_workflow( get_test_job_datas(get_tox_envs(tox_ini_path), operating_systems), "test", - workflow_directory_path + workflow_directory_path, ) @@ -234,24 +205,22 @@ def generate_lint_workflow( tox_ini_path: Path, workflow_directory_path: Path, ) -> None: - _generate_workflow( get_lint_job_datas(get_tox_envs(tox_ini_path)), "lint", - workflow_directory_path + workflow_directory_path, ) def generate_contrib_workflow( workflow_directory_path: Path, ) -> None: - _generate_workflow( get_contrib_job_datas( get_tox_envs(Path(__file__).parent.joinpath("tox.ini")) ), "contrib", - workflow_directory_path + workflow_directory_path, ) @@ -259,9 +228,8 @@ def generate_misc_workflow( tox_ini_path: Path, workflow_directory_path: Path, ) -> None: - _generate_workflow( get_misc_job_datas(get_tox_envs(tox_ini_path)), "misc", - workflow_directory_path + workflow_directory_path, ) diff --git a/.github/workflows/lint_0.yml b/.github/workflows/lint_0.yml index 2236dc422c..1fd3198785 100644 --- a/.github/workflows/lint_0.yml +++ b/.github/workflows/lint_0.yml @@ -16,6 +16,24 @@ env: jobs: + lint-instrumentation-openai-v2: + name: instrumentation-openai-v2 + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e lint-instrumentation-openai-v2 + lint-resource-detector-container: name: resource-detector-container runs-on: ubuntu-latest diff --git a/.github/workflows/misc_0.yml b/.github/workflows/misc_0.yml index edb96b60d1..e367048b72 100644 --- a/.github/workflows/misc_0.yml +++ b/.github/workflows/misc_0.yml @@ -132,3 +132,21 @@ jobs: - name: Run tests run: tox -e shellcheck + + ruff: + name: ruff + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e ruff diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml index d251737227..df74e90021 100644 --- a/.github/workflows/test_0.yml +++ b/.github/workflows/test_0.yml @@ -16,6 +16,114 @@ env: jobs: + py38-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-openai-v2 -- -ra + + py39-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-openai-v2 -- -ra + + py310-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-openai-v2 -- -ra + + py311-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-openai-v2 -- -ra + + py312-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-openai-v2 -- -ra + + pypy3-test-instrumentation-openai-v2_ubuntu-latest: + name: instrumentation-openai-v2 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-openai-v2 -- -ra + py38-test-resource-detector-container_ubuntu-latest: name: resource-detector-container 3.8 Ubuntu runs-on: ubuntu-latest @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e py311-test-instrumentation-pymemcache-2 -- -ra - - py311-test-instrumentation-pymemcache-3_ubuntu-latest: - name: instrumentation-pymemcache-3 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-pymemcache-3 -- -ra - - py311-test-instrumentation-pymemcache-4_ubuntu-latest: - name: instrumentation-pymemcache-4 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-pymemcache-4 -- -ra - - py312-test-instrumentation-pymemcache-0_ubuntu-latest: - name: instrumentation-pymemcache-0 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-pymemcache-0 -- -ra - - py312-test-instrumentation-pymemcache-1_ubuntu-latest: - name: instrumentation-pymemcache-1 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-pymemcache-1 -- -ra - - py312-test-instrumentation-pymemcache-2_ubuntu-latest: - name: instrumentation-pymemcache-2 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-pymemcache-2 -- -ra - - py312-test-instrumentation-pymemcache-3_ubuntu-latest: - name: instrumentation-pymemcache-3 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-pymemcache-3 -- -ra diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index 30ca4e67d2..30e02c5634 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -16,6 +16,114 @@ env: jobs: + py311-test-instrumentation-pymemcache-3_ubuntu-latest: + name: instrumentation-pymemcache-3 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-pymemcache-3 -- -ra + + py311-test-instrumentation-pymemcache-4_ubuntu-latest: + name: instrumentation-pymemcache-4 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-pymemcache-4 -- -ra + + py312-test-instrumentation-pymemcache-0_ubuntu-latest: + name: instrumentation-pymemcache-0 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-pymemcache-0 -- -ra + + py312-test-instrumentation-pymemcache-1_ubuntu-latest: + name: instrumentation-pymemcache-1 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-pymemcache-1 -- -ra + + py312-test-instrumentation-pymemcache-2_ubuntu-latest: + name: instrumentation-pymemcache-2 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-pymemcache-2 -- -ra + + py312-test-instrumentation-pymemcache-3_ubuntu-latest: + name: instrumentation-pymemcache-3 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-pymemcache-3 -- -ra + py312-test-instrumentation-pymemcache-4_ubuntu-latest: name: instrumentation-pymemcache-4 3.12 Ubuntu runs-on: ubuntu-latest diff --git a/.isort.cfg b/.isort.cfg deleted file mode 100644 index afe42d3d41..0000000000 --- a/.isort.cfg +++ /dev/null @@ -1,19 +0,0 @@ -[settings] -include_trailing_comma=True -force_grid_wrap=0 -use_parentheses=True -line_length=79 -profile=black - -; 3 stands for Vertical Hanging Indent, e.g. -; from third_party import ( -; lib1, -; lib2, -; lib3, -; ) -; docs: https://github.com/timothycrosley/isort#multi-line-output-modes -multi_line_output=3 -skip=target -skip_glob=**/gen/*,.venv*/*,venv*/*,.tox/* -known_first_party=opentelemetry -known_third_party=psutil,pytest,redis,redis_opentracing diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b01b7ce4d7..bf0e8f7653 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,14 +1,10 @@ repos: - - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.3.0 - hooks: - - id: black - language_version: python3.12 - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/pycqa/flake8 - rev: '6.1.0' - hooks: - - id: flake8 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.9 + hooks: + # Run the linter. + - id: ruff + args: ["--fix", "--show-fixes"] + # Run the formatter. + - id: ruff-format diff --git a/CHANGELOG.md b/CHANGELOG.md index 775043035f..a43ec36b00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,17 +13,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- `opentelemetry-instrumentation-openai-v2` Instrumentation for OpenAI >= 0.27.0 + ([#2759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2759)) - `opentelemetry-instrumentation-fastapi` Add autoinstrumentation mechanism tests. ([#2860](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2860)) - `opentelemetry-instrumentation-aiokafka` Add instrumentor and auto instrumentation support for aiokafka ([#2082](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2082)) - `opentelemetry-instrumentation-redis` Add additional attributes for methods create_index and search, rename those spans ([#2635](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2635)) +- `opentelemetry-instrumentation` Add support for string based dotted module paths in unwrap + ([#2919](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2919)) ### Fixed - `opentelemetry-instrumentation-aiokafka` Wrap `AIOKafkaConsumer.getone()` instead of `AIOKafkaConsumer.__anext__` ([#2874](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2874)) +- `opentelemetry-instrumentation-confluent-kafka` Fix to allow `topic` to be extracted from `kwargs` in `produce()` + ([#2901])(https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2901) ### Breaking changes @@ -76,6 +82,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#2753](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2753)) - `opentelemetry-instrumentation-grpc` Fix grpc supported version ([#2845](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2845)) +- `opentelemetry-instrumentation-asyncio` fix `AttributeError` in + `AsyncioInstrumentor.trace_to_thread` when `func` is a `functools.partial` instance + ([#2911](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2911)) ## Version 1.26.0/0.47b0 (2024-07-23) @@ -234,7 +243,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#2420](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2420)) - `opentelemetry-instrumentation-elasticsearch` Disabling instrumentation with native OTel support enabled ([#2524](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2524)) -- `opentelemetry-instrumentation-asyncio` Check for __name__ attribute in the coroutine +- `opentelemetry-instrumentation-asyncio` Check for **name** attribute in the coroutine ([#2521](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2521)) - `opentelemetry-instrumentation-requests` Fix wrong time unit for duration histogram ([#2553](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2553)) @@ -249,6 +258,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#2146](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2146)) ### Fixed + - `opentelemetry-instrumentation-celery` Allow Celery instrumentation to be installed multiple times ([#2342](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2342)) - Align gRPC span status codes to OTEL specification @@ -266,8 +276,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - AwsLambdaInstrumentor sets `cloud.account.id` span attribute ([#2367](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2367)) - ### Added + - `opentelemetry-instrumentation-fastapi` Add support for configuring header extraction via runtime constructor parameters ([#2241](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2241)) @@ -278,7 +288,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-resource-detector-azure` Added 10s timeout to VM Resource Detector ([#2119](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2119)) - `opentelemetry-instrumentation-asyncpg` Allow AsyncPGInstrumentor to be instantiated multiple times -([#1791](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1791)) + ([#1791](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1791)) - `opentelemetry-instrumentation-confluent-kafka` Add support for higher versions until 2.3.0 of confluent_kafka ([#2132](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2132)) - `opentelemetry-resource-detector-azure` Changed timeout to 4 seconds due to [timeout bug](https://github.com/open-telemetry/opentelemetry-python/issues/3644) @@ -362,6 +372,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#152](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2013)) ## Version 1.19.0/0.40b0 (2023-07-13) + - `opentelemetry-instrumentation-asgi` Add `http.server.request.size` metric ([#1867](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1867)) @@ -406,8 +417,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1879](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1879)) - Add optional distro and configurator selection for auto-instrumentation ([#1823](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1823)) +- `opentelemetry-instrumentation-django` - Add option to add Opentelemetry middleware at specific position in middleware chain + ([#2912]https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2912) ### Added + - `opentelemetry-instrumentation-kafka-python` Add instrumentation to `consume` method ([#1786](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1786)) @@ -458,6 +472,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1692](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1692)) ### Changed + - Update HTTP server/client instrumentation span names to comply with spec ([#1759](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1759)) @@ -495,7 +510,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Support `aio_pika` 9.x (([#1670](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1670]) -- `opentelemetry-instrumentation-redis` Add `sanitize_query` config option to allow query sanitization. ([#1572](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1572)) +- `opentelemetry-instrumentation-redis` Add `sanitize_query` config option to allow query sanitization. ([#1572](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1572)) - `opentelemetry-instrumentation-elasticsearch` Add optional db.statement query sanitization. ([#1598](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1598)) - `opentelemetry-instrumentation-celery` Record exceptions as events on the span. @@ -519,7 +534,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1575](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1575)) - Fix SQLAlchemy uninstrumentation ([#1581](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1581)) -- `opentelemetry-instrumentation-grpc` Fix code()/details() of _OpentelemetryServicerContext. +- `opentelemetry-instrumentation-grpc` Fix code()/details() of \_OpentelemetryServicerContext. ([#1578](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1578)) - Fix aiopg instrumentation to work with aiopg < 2.0.0 ([#1473](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1473)) @@ -571,7 +586,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1430](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1430)) - `opentelemetry-instrumentation-aiohttp-client` Allow overriding of status in response hook. ([#1394](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1394)) -- `opentelemetry-instrumentation-pymysql` Fix dbapi connection instrument wrapper has no _sock member. +- `opentelemetry-instrumentation-pymysql` Fix dbapi connection instrument wrapper has no \_sock member. ([#1424](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1424)) - `opentelemetry-instrumentation-dbapi` Fix the check for the connection already being instrumented in instrument_connection(). ([#1424](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1424)) @@ -656,7 +671,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add metric instrumentation in starlette ([#1327](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1327)) - ### Fixed - `opentelemetry-instrumentation-kafka-python`: wait for metadata @@ -669,7 +683,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1208](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1208)) - `opentelemetry-instrumentation-aiohttp-client` Fix producing additional spans with each newly created ClientSession - ([#1246](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1246)) -- Add _is_opentelemetry_instrumented check in _InstrumentedFastAPI class +- Add \_is_opentelemetry_instrumented check in \_InstrumentedFastAPI class ([#1313](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1313)) - Fix uninstrumentation of existing app instances in FastAPI ([#1258](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1258)) @@ -688,6 +702,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#1203](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1203)) ### Added + - `opentelemetry-instrumentation-redis` add support to instrument RedisCluster clients ([#1177](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1177)) - `opentelemetry-instrumentation-sqlalchemy` Added span for the connection phase ([#1133](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1133)) @@ -700,11 +715,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [1.12.0rc2-0.32b0](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v1.12.0rc2-0.32b0) - 2022-07-01 - - Pyramid: Only categorize 500s server exceptions as errors ([#1037](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1037)) ### Fixed + - Fix bug in system metrics by checking their configuration ([#1129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1129)) - Adding escape call to fix [auto-instrumentation not producing spans on Windows](https://github.com/open-telemetry/opentelemetry-python/issues/2703). @@ -717,8 +732,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - fixed typo in `system.network.io` metric configuration ([#1135](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1135)) - ### Added + - `opentelemetry-instrumentation-aiohttp-client` Add support for optional custom trace_configs argument. ([1079](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1079)) - `opentelemetry-instrumentation-sqlalchemy` add support to instrument multiple engines @@ -742,10 +757,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Integrated sqlcommenter plugin into opentelemetry-instrumentation-django ([#896](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/896)) - ## Version 1.12.0rc1/0.31b0 (2022-05-17) ### Fixed + - `opentelemetry-instrumentation-aiohttp-client` make span attributes available to sampler ([#1072](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1072)) - `opentelemetry-instrumentation-aws-lambda` Fixed an issue - in some rare cases (API GW proxy integration test) @@ -758,6 +773,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-sdk-extension-aws` change timeout for AWS EC2 and EKS metadata requests from 1000 seconds and 2000 seconds to 1 second ### Added + - `opentelemetry-instrument` and `opentelemetry-bootstrap` now include a `--version` flag ([#1065](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1065)) - `opentelemetry-instrumentation-redis` now instruments asynchronous Redis clients, if the installed redis-py includes async support (>=4.2.0). @@ -765,22 +781,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-boto3sqs` added AWS's SQS instrumentation. ([#1081](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1081)) - ## Version 1.11.1/0.30b1 (2022-04-21) ### Added + - `opentelemetry-instrumentation-starlette` Capture custom request/response headers in span attributes ([#1046](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1046)) ### Fixed + - Prune autoinstrumentation sitecustomize module directory from PYTHONPATH immediately ([#1066](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1066)) - ## Version 1.11.0/0.30b0 (2022-04-18) ### Fixed -- `opentelemetry-instrumentation-pyramid` Fixed which package is the correct caller in _traced_init. + +- `opentelemetry-instrumentation-pyramid` Fixed which package is the correct caller in \_traced_init. ([#830](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/830)) - `opentelemetry-instrumentation-tornado` Fix Tornado errors mapping to 500 ([#1048](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1048)) @@ -814,7 +831,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-pyramid` Pyramid: Capture custom request/response headers in span attributes ([#1022](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1022)) - ## Version 1.10.0/0.29b0 (2022-03-10) - `opentelemetry-instrumentation-wsgi` Capture custom request/response headers in span attributes @@ -828,7 +844,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-aws-lambda` `SpanKind.SERVER` by default, add more cases for `SpanKind.CONSUMER` services. ([#926](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/926)) - `opentelemetry-instrumentation-sqlalchemy` added experimental sql commenter capability - ([#924](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/924)) + ([#924](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/924)) - `opentelemetry-contrib-instrumentations` added new meta-package that installs all contrib instrumentations. ([#681](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/681)) - `opentelemetry-instrumentation-dbapi` add experimental sql commenter capability @@ -867,12 +883,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Version 1.9.0/0.28b0 (2022-01-26) - ### Added - `opentelemetry-instrumentation-pyramid` Pyramid: Conditionally create SERVER spans ([#869](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/869)) -- `opentelemetry-instrumentation-grpc` added `trailing_metadata` to _OpenTelemetryServicerContext. +- `opentelemetry-instrumentation-grpc` added `trailing_metadata` to \_OpenTelemetryServicerContext. ([#871](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/871)) - `opentelemetry-instrumentation-asgi` now returns a `traceresponse` response header. ([#817](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/817)) @@ -906,12 +921,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-instrumentation-aiohttp-client` aiohttp: Remove `span_name` from docs ([#857](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/857)) - ## Version 1.8.0/0.27b0 (2021-12-17) ### Added -- `opentelemetry-instrumentation-aws-lambda` Adds support for configurable flush timeout via `OTEL_INSTRUMENTATION_AWS_LAMBDA_FLUSH_TIMEOUT` property. ([#825](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/825)) +- `opentelemetry-instrumentation-aws-lambda` Adds support for configurable flush timeout via `OTEL_INSTRUMENTATION_AWS_LAMBDA_FLUSH_TIMEOUT` property. ([#825](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/825)) - `opentelemetry-instrumentation-pika` Adds support for versions between `0.12.0` to `1.0.0`. ([#837](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/837)) ### Fixed @@ -981,13 +995,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#755](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/755)) ### Added + - `opentelemetry-instrumentation-pika` Add `publish_hook` and `consume_hook` callbacks passed as arguments to the instrument method ([#763](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/763)) - ## Version 1.6.1/0.25b1 (2021-10-18) ### Changed + - `opentelemetry-util-http` no longer contains an instrumentation entrypoint and will not be loaded automatically by the auto instrumentor. ([#745](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/745)) @@ -1001,7 +1016,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#760](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/760)) ## Version 1.6.0/0.25b0 (2021-10-13) + ### Added + - `opentelemetry-sdk-extension-aws` Release AWS Python SDK Extension as 1.0.0 ([#667](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/667)) - `opentelemetry-instrumentation-urllib3`, `opentelemetry-instrumentation-requests` @@ -1028,6 +1045,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#391](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/391)) ### Changed + - `opentelemetry-instrumentation-flask` Fix `RuntimeError: Working outside of request context` ([#734](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/734)) - `opentelemetry-propagators-aws-xray` Rename `AwsXRayFormat` to `AwsXRayPropagator` @@ -1058,6 +1076,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Version 1.5.0/0.24b0 (2021-08-26) ### Added + - `opentelemetry-sdk-extension-aws` Add AWS resource detectors to extension package ([#586](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/586)) - `opentelemetry-instrumentation-asgi`, `opentelemetry-instrumentation-aiohttp-client`, `openetelemetry-instrumentation-fastapi`, @@ -1076,10 +1095,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Version 1.4.0/0.23b0 (2021-07-21) ### Removed + - Move `opentelemetry-instrumentation` to the core repo. ([#595](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/595)) ### Changed + - `opentelemetry-instrumentation-falcon` added support for Falcon 3. ([#607](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/607)) - `opentelemetry-instrumentation-tornado` properly instrument work done in tornado on_finish method. @@ -1127,12 +1148,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#568](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/568)) ### Added + - `opentelemetry-instrumentation-httpx` Add `httpx` instrumentation ([#461](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/461)) ## Version 1.3.0/0.22b0 (2021-06-01) ### Changed + - `opentelemetry-bootstrap` not longer forcibly removes and re-installs libraries and their instrumentations. This means running bootstrap will not auto-upgrade existing dependencies and as a result not cause dependency conflicts. @@ -1149,6 +1172,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#488](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/488)) ### Added + - `opentelemetry-instrumentation-botocore` now supports context propagation for lambda invoke via Payload embedded headers. ([#458](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/458)) @@ -1158,6 +1182,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Version 1.2.0/0.21b0 (2021-05-11) ### Changed + - Instrumentation packages don't specify the libraries they instrument as dependencies anymore. Instead, they verify the correct version of libraries are installed at runtime. ([#475](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/475)) @@ -1709,6 +1734,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-ext-http-requests` Updates for core library changes - `Added support for PyPy3` Initial release + ## [#1033](https://github.com/open-telemetryopentelemetry-python-contrib/issues/1033) ## Version 0.1a0 (2019-09-30) @@ -1723,7 +1749,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `opentelemetry-resource-detector-azure` Added 10s timeout to VM Resource Detector ([#2119](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2119)) - `opentelemetry-instrumentation-asyncpg` Allow AsyncPGInstrumentor to be instantiated multiple times -([#1791](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1791)) + ([#1791](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1791)) - `opentelemetry-instrumentation-confluent-kafka` Add support for higher versions until 2.3.0 of confluent_kafka ([#2132](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2132)) - `opentelemetry-resource-detector-azure` Changed timeout to 4 seconds due to [timeout bug](https://github.com/open-telemetry/opentelemetry-python/issues/3644) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 61f261f001..8d72683692 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -67,20 +67,15 @@ You can run `tox` with the following arguments: Python version * `tox -e spellcheck` to run a spellcheck on all the code * `tox -e lint-some-package` to run lint checks on `some-package` +* `tox -e ruff` to run ruff linter and formatter checks against the entire codebase -`black` and `isort` are executed when `tox -e lint` is run. The reported errors can be tedious to fix manually. -An easier way to do so is: - -1. Run `.tox/lint/bin/black .` -2. Run `.tox/lint/bin/isort .` - -Or you can call formatting and linting in one command by [pre-commit](https://pre-commit.com/): +`ruff check` and `ruff format` are executed when `tox -e ruff` is run. We strongly recommend you to configure [pre-commit](https://pre-commit.com/) locally to run `ruff` automatically before each commit by installing it as git hooks. You just need to [install pre-commit](https://pre-commit.com/#install) in your environment: ```console -$ pre-commit +$ pip install pre-commit -c dev-requirements.txt ``` -You can also configure it to run lint tools automatically before committing with: +and run this command inside the git repository: ```console $ pre-commit install diff --git a/_template/pyproject.toml b/_template/pyproject.toml index 514b537f42..b180d32ad8 100644 --- a/_template/pyproject.toml +++ b/_template/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ [project.entry-points.opentelemetry_instrumentor] # REPLACE ME: the entrypoint for the instrumentor e.g # sqlalchemy = "opentelemetry.instrumentation.sqlalchemy:SQLAlchemyInstrumentor" - = "opentelemetry.instrumentation." +REPLACE_ME = "opentelemetry.instrumentation." [project.urls] # url of the instrumentation e.g diff --git a/dev-requirements.txt b/dev-requirements.txt index 3289650ac8..70464ffdd7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,7 +1,4 @@ pylint==3.0.2 -flake8==6.1.0 -isort==5.12.0 -black==24.3.0 httpretty==1.1.4 mypy==0.931 sphinx==7.1.2 @@ -19,3 +16,4 @@ ruamel.yaml==0.17.21 flaky==3.7.0 pre-commit==3.7.0; python_version >= '3.9' pre-commit==3.5.0; python_version < '3.9' +ruff==0.6.9 diff --git a/eachdist.ini b/eachdist.ini index 950b751685..c04fb571ce 100644 --- a/eachdist.ini +++ b/eachdist.ini @@ -50,6 +50,7 @@ packages= opentelemetry-resource-detector-azure opentelemetry-sdk-extension-aws opentelemetry-propagator-aws-xray + opentelemetry-instrumentation-openai-v2 opentelemetry-instrumentation-test [lintroots] diff --git a/exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py b/exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py index 652e5eae8d..78b8516a46 100644 --- a/exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py @@ -29,14 +29,14 @@ Sample, TimeSeries, ) -from opentelemetry.sdk.metrics import Counter -from opentelemetry.sdk.metrics import Histogram as ClientHistogram from opentelemetry.sdk.metrics import ( + Counter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) +from opentelemetry.sdk.metrics import Histogram as ClientHistogram from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, diff --git a/gen-requirements.txt b/gen-requirements.txt index b2d5c4f695..074806f30f 100644 --- a/gen-requirements.txt +++ b/gen-requirements.txt @@ -2,8 +2,7 @@ astor==0.8.1 jinja2==3.1.4 markupsafe==2.0.1 -isort -black +ruff==0.6.9 requests tomli tomli_w diff --git a/instrumentation/README.md b/instrumentation/README.md index 6bb47f6f9c..db437fe518 100644 --- a/instrumentation/README.md +++ b/instrumentation/README.md @@ -29,6 +29,7 @@ | [opentelemetry-instrumentation-logging](./opentelemetry-instrumentation-logging) | logging | No | experimental | [opentelemetry-instrumentation-mysql](./opentelemetry-instrumentation-mysql) | mysql-connector-python >= 8.0, < 10.0 | No | experimental | [opentelemetry-instrumentation-mysqlclient](./opentelemetry-instrumentation-mysqlclient) | mysqlclient < 3 | No | experimental +| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | No | experimental | [opentelemetry-instrumentation-pika](./opentelemetry-instrumentation-pika) | pika >= 0.12.0 | No | experimental | [opentelemetry-instrumentation-psycopg](./opentelemetry-instrumentation-psycopg) | psycopg >= 3.1.0 | No | experimental | [opentelemetry-instrumentation-psycopg2](./opentelemetry-instrumentation-psycopg2) | psycopg2 >= 2.7.3.1 | No | experimental diff --git a/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/aio_pika_instrumentor.py b/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/aio_pika_instrumentor.py index caf0e5b1a9..48a936dc61 100644 --- a/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/aio_pika_instrumentor.py +++ b/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/aio_pika_instrumentor.py @@ -40,7 +40,7 @@ async def wrapper(wrapped, instance, args, kwargs): async def consume( callback: Callable[[AbstractIncomingMessage], Any], *fargs, - **fkwargs + **fkwargs, ): decorated_callback = CallbackDecorator( tracer, instance diff --git a/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/span_builder.py b/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/span_builder.py index c62b1ea9bf..dd5433756c 100644 --- a/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/span_builder.py +++ b/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/span_builder.py @@ -69,23 +69,27 @@ def set_channel(self, channel: AbstractChannel): def set_message(self, message: AbstractMessage): properties = message.properties if properties.message_id: - self._attributes[ - SpanAttributes.MESSAGING_MESSAGE_ID - ] = properties.message_id + self._attributes[SpanAttributes.MESSAGING_MESSAGE_ID] = ( + properties.message_id + ) if properties.correlation_id: - self._attributes[ - SpanAttributes.MESSAGING_CONVERSATION_ID - ] = properties.correlation_id + self._attributes[SpanAttributes.MESSAGING_CONVERSATION_ID] = ( + properties.correlation_id + ) def build(self) -> Optional[Span]: if not is_instrumentation_enabled(): return None if self._operation: - self._attributes[SpanAttributes.MESSAGING_OPERATION] = self._operation.value + self._attributes[SpanAttributes.MESSAGING_OPERATION] = ( + self._operation.value + ) else: self._attributes[SpanAttributes.MESSAGING_TEMP_DESTINATION] = True span = self._tracer.start_span( - self._generate_span_name(), kind=self._kind, attributes=self._attributes + self._generate_span_name(), + kind=self._kind, + attributes=self._attributes, ) return span diff --git a/instrumentation/opentelemetry-instrumentation-aiohttp-client/tests/test_aiohttp_client_integration.py b/instrumentation/opentelemetry-instrumentation-aiohttp-client/tests/test_aiohttp_client_integration.py index 9ebb180de1..33b08fc0b6 100644 --- a/instrumentation/opentelemetry-instrumentation-aiohttp-client/tests/test_aiohttp_client_integration.py +++ b/instrumentation/opentelemetry-instrumentation-aiohttp-client/tests/test_aiohttp_client_integration.py @@ -71,7 +71,6 @@ async def do_request(): class TestAioHttpIntegration(TestBase): - _test_status_codes = ( (HTTPStatus.OK, StatusCode.UNSET), (HTTPStatus.TEMPORARY_REDIRECT, StatusCode.UNSET), diff --git a/instrumentation/opentelemetry-instrumentation-aiohttp-server/tests/test_aiohttp_server_integration.py b/instrumentation/opentelemetry-instrumentation-aiohttp-server/tests/test_aiohttp_server_integration.py index e9dfb11389..57eb6234a5 100644 --- a/instrumentation/opentelemetry-instrumentation-aiohttp-server/tests/test_aiohttp_server_integration.py +++ b/instrumentation/opentelemetry-instrumentation-aiohttp-server/tests/test_aiohttp_server_integration.py @@ -92,7 +92,6 @@ async def fixture_server_fixture(tracer, aiohttp_server, suppress): def test_checking_instrumentor_pkg_installed(): - (instrumentor_entrypoint,) = entry_points( group="opentelemetry_instrumentor", name="aiohttp-server" ) diff --git a/instrumentation/opentelemetry-instrumentation-aiokafka/src/opentelemetry/instrumentation/aiokafka/__init__.py b/instrumentation/opentelemetry-instrumentation-aiokafka/src/opentelemetry/instrumentation/aiokafka/__init__.py index 7d994be622..507206f4f2 100644 --- a/instrumentation/opentelemetry-instrumentation-aiokafka/src/opentelemetry/instrumentation/aiokafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aiokafka/src/opentelemetry/instrumentation/aiokafka/__init__.py @@ -67,6 +67,7 @@ async def async_consume_hook(span, record, args, kwargs): API ___ """ + from asyncio import iscoroutinefunction from typing import Collection diff --git a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py index a4bde482db..4e6257fbb1 100644 --- a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py +++ b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py @@ -102,7 +102,7 @@ async def traced_execution( cursor, query_method: typing.Callable[..., typing.Any], *args: typing.Tuple[typing.Any, typing.Any], - **kwargs: typing.Dict[typing.Any, typing.Any] + **kwargs: typing.Dict[typing.Any, typing.Any], ): name = "" if args: diff --git a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/wrappers.py b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/wrappers.py index c4252615b8..06098ee7a0 100644 --- a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/wrappers.py +++ b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/wrappers.py @@ -29,6 +29,7 @@ API --- """ + import logging import typing diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py index bc45eacaa4..725532bc15 100644 --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -301,9 +301,7 @@ def keys(self, carrier: dict) -> typing.List[str]: class ASGISetter(Setter[dict]): - def set( - self, carrier: dict, key: str, value: str - ) -> None: # pylint: disable=no-self-use + def set(self, carrier: dict, key: str, value: str) -> None: # pylint: disable=no-self-use """Sets response header values on an ASGI scope according to `the spec `_. Args: diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py index e83f384a8c..2d1b063dfd 100644 --- a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py @@ -76,7 +76,9 @@ def func(): API --- """ + import asyncio +import functools import sys from asyncio import futures from timeit import default_timer @@ -163,7 +165,6 @@ def instrument_method_with_coroutine(self, method_name: str): """ def wrap_coro_or_future(method, instance, args, kwargs): - # If the first argument is a coroutine or future, # we decorate it with a span and return the task. if args and len(args) > 0: @@ -231,14 +232,15 @@ def wrap_taskgroup_create_task(method, instance, args, kwargs) -> None: def trace_to_thread(self, func: callable): """Trace a function.""" start = default_timer() + func_name = getattr(func, "__name__", None) + if func_name is None and isinstance(func, functools.partial): + func_name = func.func.__name__ span = ( - self._tracer.start_span( - f"{ASYNCIO_PREFIX} to_thread-" + func.__name__ - ) - if func.__name__ in self._to_thread_name_to_trace + self._tracer.start_span(f"{ASYNCIO_PREFIX} to_thread-" + func_name) + if func_name in self._to_thread_name_to_trace else None ) - attr = {"type": "to_thread", "name": func.__name__} + attr = {"type": "to_thread", "name": func_name} exception = None try: attr["state"] = "finished" diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/environment_variables.py b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/environment_variables.py index 7420ea362f..9f324d60f4 100644 --- a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/environment_variables.py +++ b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/environment_variables.py @@ -15,6 +15,7 @@ """ Enter the names of the coroutines to be traced through the environment variable below, separated by commas. """ + OTEL_PYTHON_ASYNCIO_COROUTINE_NAMES_TO_TRACE = ( "OTEL_PYTHON_ASYNCIO_COROUTINE_NAMES_TO_TRACE" ) diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_anext.py b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_anext.py index 5241b3f2cc..f964044fc7 100644 --- a/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_anext.py +++ b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_anext.py @@ -56,7 +56,7 @@ async def async_gen(): yield it async_gen_instance = async_gen() - agen = anext(async_gen_instance) + agen = anext(async_gen_instance) # noqa: F821 return await asyncio.create_task(agen) ret = asyncio.run(main()) diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_to_thread.py b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_to_thread.py index 3d795d8ae7..35191d3d03 100644 --- a/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_to_thread.py +++ b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_to_thread.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import asyncio +import functools import sys from unittest import skipIf from unittest.mock import patch @@ -72,3 +73,37 @@ async def to_thread(): for point in metric.data.data_points: self.assertEqual(point.attributes["type"], "to_thread") self.assertEqual(point.attributes["name"], "multiply") + + @skipIf( + sys.version_info < (3, 9), "to_thread is only available in Python 3.9+" + ) + def test_to_thread_partial_func(self): + def multiply(x, y): + return x * y + + double = functools.partial(multiply, 2) + + async def to_thread(): + result = await asyncio.to_thread(double, 3) + assert result == 6 + + with self._tracer.start_as_current_span("root"): + asyncio.run(to_thread()) + spans = self.memory_exporter.get_finished_spans() + + self.assertEqual(len(spans), 2) + assert spans[0].name == "asyncio to_thread-multiply" + for metric in ( + self.memory_metrics_reader.get_metrics_data() + .resource_metrics[0] + .scope_metrics[0] + .metrics + ): + if metric.name == "asyncio.process.duration": + for point in metric.data.data_points: + self.assertEqual(point.attributes["type"], "to_thread") + self.assertEqual(point.attributes["name"], "multiply") + if metric.name == "asyncio.process.created": + for point in metric.data.data_points: + self.assertEqual(point.attributes["type"], "to_thread") + self.assertEqual(point.attributes["name"], "multiply") diff --git a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py index ba76254aa8..306f8b15c0 100644 --- a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py @@ -96,7 +96,6 @@ def _hydrate_span_from_args(connection, query, parameters) -> dict: class AsyncPGInstrumentor(BaseInstrumentor): - _leading_comment_remover = re.compile(r"^/\*.*?\*/") _tracer = None diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py index fb5da8ce48..68db87ca30 100644 --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py @@ -258,13 +258,11 @@ def _instrument( tracer_provider: TracerProvider = None, meter_provider: MeterProvider = None, ): - # pylint: disable=too-many-locals # pylint: disable=too-many-statements def _instrumented_lambda_handler_call( # noqa pylint: disable=too-many-branches call_wrapped, instance, args, kwargs ): - orig_handler_name = ".".join( [wrapped_module_name, wrapped_function_name] ) diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py index 7f805c327c..4ac1e9c873 100644 --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py @@ -70,9 +70,7 @@ def __init__(self, aws_request_id, invoked_function_arn): SpanAttributes.FAAS_INVOCATION_ID: MOCK_LAMBDA_CONTEXT.aws_request_id, ResourceAttributes.CLOUD_ACCOUNT_ID: MOCK_LAMBDA_CONTEXT.invoked_function_arn.split( ":" - )[ - 4 - ], + )[4], } MOCK_XRAY_TRACE_ID = 0x5FB7331105E8BB83207FA31D4D9CDB4C diff --git a/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py b/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py index c0231f81e4..83e5ed70bc 100644 --- a/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py @@ -28,6 +28,7 @@ --- """ + import logging from typing import Any, Collection, Dict, Generator, List, Mapping, Optional diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py index 271a8475e6..fdb1c7f8a5 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py @@ -35,7 +35,7 @@ def set(self, carrier: CarrierT, key: str, value: str): def inject_propagation_context( - carrier: MutableMapping[str, Any] + carrier: MutableMapping[str, Any], ) -> MutableMapping[str, Any]: if carrier is None: carrier = {} diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py index 3d1cc79c93..aaf906d118 100644 --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py @@ -97,6 +97,7 @@ def instrument_consumer(consumer: Consumer, tracer_provider=None) ___ """ + from typing import Collection import confluent_kafka @@ -123,9 +124,7 @@ def instrument_consumer(consumer: Consumer, tracer_provider=None) class AutoInstrumentedProducer(Producer): # This method is deliberately implemented in order to allow wrapt to wrap this function - def produce( - self, topic, value=None, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg,useless-super-delegation + def produce(self, topic, value=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,useless-super-delegation super().produce(topic, value, *args, **kwargs) @@ -139,9 +138,7 @@ def poll(self, timeout=-1): # pylint: disable=useless-super-delegation return super().poll(timeout) # This method is deliberately implemented in order to allow wrapt to wrap this function - def consume( - self, *args, **kwargs - ): # pylint: disable=useless-super-delegation + def consume(self, *args, **kwargs): # pylint: disable=useless-super-delegation return super().consume(*args, **kwargs) # This method is deliberately implemented in order to allow wrapt to wrap this function @@ -163,9 +160,7 @@ def poll(self, timeout=-1): def purge(self, in_queue=True, in_flight=True, blocking=True): self._producer.purge(in_queue, in_flight, blocking) - def produce( - self, topic, value=None, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg + def produce(self, topic, value=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg new_kwargs = kwargs.copy() new_kwargs["topic"] = topic new_kwargs["value"] = value @@ -205,9 +200,7 @@ def consume(self, *args, **kwargs): kwargs, ) - def get_watermark_offsets( - self, partition, timeout=-1, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg + def get_watermark_offsets(self, partition, timeout=-1, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg return self._consumer.get_watermark_offsets( partition, timeout, *args, **kwargs ) @@ -220,9 +213,7 @@ def poll(self, timeout=-1): self._consumer.poll, self, self._tracer, [timeout], {} ) - def subscribe( - self, topics, on_assign=lambda *args: None, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg + def subscribe(self, topics, on_assign=lambda *args: None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg self._consumer.subscribe(topics, on_assign, *args, **kwargs) def original_consumer(self): @@ -363,7 +354,9 @@ def wrap_produce(func, instance, tracer, args, kwargs): headers = [] kwargs["headers"] = headers - topic = KafkaPropertiesExtractor.extract_produce_topic(args) + topic = KafkaPropertiesExtractor.extract_produce_topic( + args, kwargs + ) _enrich_span( span, topic, diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py index 4769f2a88f..60dc13e675 100644 --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py @@ -25,11 +25,9 @@ def _extract_argument(key, position, default_value, args, kwargs): return kwargs.get(key, default_value) @staticmethod - def extract_produce_topic(args): + def extract_produce_topic(args, kwargs): """extract topic from `produce` method arguments in Producer class""" - if len(args) > 0: - return args[0] - return "unknown" + return kwargs.get("topic") or (args[0] if args else "unknown") @staticmethod def extract_produce_headers(args, kwargs): diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py index 27653d6777..986116900d 100644 --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py @@ -284,6 +284,15 @@ def _compare_spans(self, spans, expected_spans): expected_attribute_value, span.attributes[attribute_key] ) + def _assert_topic(self, span, expected_topic: str) -> None: + self.assertEqual( + span.attributes[SpanAttributes.MESSAGING_DESTINATION], + expected_topic, + ) + + def _assert_span_count(self, span_list, expected_count: int) -> None: + self.assertEqual(len(span_list), expected_count) + def test_producer_poll(self) -> None: instrumentation = ConfluentKafkaInstrumentor() message_queue = [] @@ -299,6 +308,9 @@ def test_producer_poll(self) -> None: producer.produce(topic="topic-1", key="key-1", value="value-1") msg = producer.poll() self.assertIsNotNone(msg) + span_list = self.memory_exporter.get_finished_spans() + self._assert_span_count(span_list, 1) + self._assert_topic(span_list[0], "topic-1") def test_producer_flush(self) -> None: instrumentation = ConfluentKafkaInstrumentor() @@ -315,3 +327,6 @@ def test_producer_flush(self) -> None: producer.produce(topic="topic-1", key="key-1", value="value-1") msg = producer.flush() self.assertIsNotNone(msg) + span_list = self.memory_exporter.get_finished_spans() + self._assert_span_count(span_list, 1) + self._assert_topic(span_list[0], "topic-1") diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py index 92e11798f6..f87dbd6576 100644 --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py @@ -8,9 +8,7 @@ def __init__(self, queue, config): self._queue = queue super().__init__(config) - def consume( - self, num_messages=1, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg + def consume(self, num_messages=1, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg messages = self._queue[:num_messages] self._queue = self._queue[num_messages:] return messages @@ -62,9 +60,7 @@ def __init__(self, queue, config): self._queue = queue super().__init__(config) - def produce( - self, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg + def produce(self, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg self._queue.append( MockedMessage( topic=kwargs.get("topic"), diff --git a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/__init__.py b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/__init__.py index d37c45993c..e5851a17c2 100644 --- a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/__init__.py @@ -285,6 +285,30 @@ def _get_django_middleware_setting() -> str: return "MIDDLEWARE" +def _get_django_otel_middleware_position( + middleware_length, default_middleware_position=0 +): + otel_position = environ.get("OTEL_PYTHON_DJANGO_MIDDLEWARE_POSITION") + try: + middleware_position = int(otel_position) + except (ValueError, TypeError): + _logger.debug( + "Invalid OTEL_PYTHON_DJANGO_MIDDLEWARE_POSITION value: (%s). Using default position: %d.", + otel_position, + default_middleware_position, + ) + middleware_position = default_middleware_position + + if middleware_position < 0 or middleware_position > middleware_length: + _logger.debug( + "Middleware position %d is out of range (0-%d). Using 0 as the position", + middleware_position, + middleware_length, + ) + middleware_position = 0 + return middleware_position + + class DjangoInstrumentor(BaseInstrumentor): """An instrumentor for Django @@ -388,10 +412,18 @@ def _instrument(self, **kwargs): is_sql_commentor_enabled = kwargs.pop("is_sql_commentor_enabled", None) + middleware_position = _get_django_otel_middleware_position( + len(settings_middleware), kwargs.pop("middleware_position", 0) + ) + if is_sql_commentor_enabled: - settings_middleware.insert(0, self._sql_commenter_middleware) + settings_middleware.insert( + middleware_position, self._sql_commenter_middleware + ) - settings_middleware.insert(0, self._opentelemetry_middleware) + settings_middleware.insert( + middleware_position, self._opentelemetry_middleware + ) setattr(settings, _middleware_setting, settings_middleware) diff --git a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py index 667d6f1091..da807cc310 100644 --- a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py @@ -40,7 +40,10 @@ _start_internal_or_server_span, extract_attributes_from_object, ) -from opentelemetry.instrumentation.wsgi import add_response_attributes +from opentelemetry.instrumentation.wsgi import ( + add_response_attributes, + wsgi_getter, +) from opentelemetry.instrumentation.wsgi import ( collect_custom_request_headers_attributes as wsgi_collect_custom_request_headers_attributes, ) @@ -50,7 +53,6 @@ from opentelemetry.instrumentation.wsgi import ( collect_request_attributes as wsgi_collect_request_attributes, ) -from opentelemetry.instrumentation.wsgi import wsgi_getter from opentelemetry.semconv.attributes.http_attributes import HTTP_ROUTE from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import Span, SpanKind, use_span @@ -107,14 +109,17 @@ def __call__(self, request): # try/except block exclusive for optional ASGI imports. try: - from opentelemetry.instrumentation.asgi import asgi_getter, asgi_setter + from opentelemetry.instrumentation.asgi import ( + asgi_getter, + asgi_setter, + set_status_code, + ) from opentelemetry.instrumentation.asgi import ( collect_custom_headers_attributes as asgi_collect_custom_headers_attributes, ) from opentelemetry.instrumentation.asgi import ( collect_request_attributes as asgi_collect_request_attributes, ) - from opentelemetry.instrumentation.asgi import set_status_code _is_asgi_supported = True except ImportError: diff --git a/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py b/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py index 85ebbd747f..1c85935892 100644 --- a/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py @@ -157,6 +157,46 @@ def tearDownClass(cls): super().tearDownClass() conf.settings = conf.LazySettings() + def test_middleware_added_at_position(self): + _django_instrumentor.uninstrument() + if DJANGO_2_0: + middleware = conf.settings.MIDDLEWARE + else: + middleware = conf.settings.MIDDLEWARE_CLASSES + # adding two dummy middlewares + temprory_middelware = "django.utils.deprecation.MiddlewareMixin" + middleware.append(temprory_middelware) + middleware.append(temprory_middelware) + + middleware_position = 1 + _django_instrumentor.instrument( + middleware_position=middleware_position + ) + self.assertEqual( + middleware[middleware_position], + "opentelemetry.instrumentation.django.middleware.otel_middleware._DjangoMiddleware", + ) + + def test_middleware_added_at_position_if_wrong_position(self): + _django_instrumentor.uninstrument() + if DJANGO_2_0: + middleware = conf.settings.MIDDLEWARE + else: + middleware = conf.settings.MIDDLEWARE_CLASSES + # adding middleware + temprory_middelware = "django.utils.deprecation.MiddlewareMixin" + middleware.append(temprory_middelware) + middleware_position = ( + 756 # wrong position out of bound of middleware length + ) + _django_instrumentor.instrument( + middleware_position=middleware_position + ) + self.assertEqual( + middleware[0], + "opentelemetry.instrumentation.django.middleware.otel_middleware._DjangoMiddleware", + ) + def test_templated_route_get(self): Client().get("/route/2020/template/") diff --git a/instrumentation/opentelemetry-instrumentation-django/tests/test_sqlcommenter.py b/instrumentation/opentelemetry-instrumentation-django/tests/test_sqlcommenter.py index f9b8ed5233..eec02d7a54 100644 --- a/instrumentation/opentelemetry-instrumentation-django/tests/test_sqlcommenter.py +++ b/instrumentation/opentelemetry-instrumentation-django/tests/test_sqlcommenter.py @@ -72,6 +72,37 @@ def test_middleware_added(self, sqlcommenter_middleware): in middleware ) + @patch( + "opentelemetry.instrumentation.django.middleware.sqlcommenter_middleware.SqlCommenter" + ) + def test_middleware_added_at_position(self, sqlcommenter_middleware): + _django_instrumentor.uninstrument() + if DJANGO_2_0: + middleware = conf.settings.MIDDLEWARE + else: + middleware = conf.settings.MIDDLEWARE_CLASSES + + # adding two dummy middlewares + temprory_middelware = "django.utils.deprecation.MiddlewareMixin" + middleware.append(temprory_middelware) + middleware.append(temprory_middelware) + + middleware_position = 1 + _django_instrumentor.instrument( + is_sql_commentor_enabled=True, + middleware_position=middleware_position, + ) + instance = sqlcommenter_middleware.return_value + instance.get_response = HttpResponse() + self.assertEqual( + middleware[middleware_position], + "opentelemetry.instrumentation.django.middleware.otel_middleware._DjangoMiddleware", + ) + self.assertEqual( + middleware[middleware_position + 1], + "opentelemetry.instrumentation.django.middleware.sqlcommenter_middleware.SqlCommenter", + ) + @patch( "opentelemetry.instrumentation.django.middleware.sqlcommenter_middleware._get_opentelemetry_values" ) diff --git a/instrumentation/opentelemetry-instrumentation-django/tests/views.py b/instrumentation/opentelemetry-instrumentation-django/tests/views.py index 6310664100..f2ede18b74 100644 --- a/instrumentation/opentelemetry-instrumentation-django/tests/views.py +++ b/instrumentation/opentelemetry-instrumentation-django/tests/views.py @@ -25,9 +25,7 @@ def excluded_noarg2(request): # pylint: disable=unused-argument return HttpResponse() -def route_span_name( - request, *args, **kwargs -): # pylint: disable=unused-argument +def route_span_name(request, *args, **kwargs): # pylint: disable=unused-argument return HttpResponse() @@ -49,9 +47,7 @@ async def async_traced(request): # pylint: disable=unused-argument return HttpResponse() -async def async_traced_template( - request, year -): # pylint: disable=unused-argument +async def async_traced_template(request, year): # pylint: disable=unused-argument return HttpResponse() @@ -71,9 +67,7 @@ async def async_excluded_noarg2(request): # pylint: disable=unused-argument return HttpResponse() -async def async_route_span_name( - request, *args, **kwargs -): # pylint: disable=unused-argument +async def async_route_span_name(request, *args, **kwargs): # pylint: disable=unused-argument return HttpResponse() diff --git a/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py b/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py index 2dce5f1ef5..28b394eaf0 100644 --- a/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py @@ -296,9 +296,7 @@ def __del__(self): if self in _InstrumentedFalconAPI._instrumented_falcon_apps: _InstrumentedFalconAPI._instrumented_falcon_apps.remove(self) - def _handle_exception( - self, arg1, arg2, arg3, arg4 - ): # pylint: disable=C0103 + def _handle_exception(self, arg1, arg2, arg3, arg4): # pylint: disable=C0103 # Falcon 3 does not execute middleware within the context of the exception # so we capture the exception here and save it into the env dict @@ -437,9 +435,7 @@ def process_resource(self, req, resp, resource, params): resource_name = resource.__class__.__name__ span.set_attribute("falcon.resource", resource_name) - def process_response( - self, req, resp, resource, req_succeeded=None - ): # pylint:disable=R0201,R0912 + def process_response(self, req, resp, resource, req_succeeded=None): # pylint:disable=R0201,R0912 span = req.env.get(_ENVIRON_SPAN_KEY) if not span or not span.is_recording(): diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py index bde91ccfcf..fdbad4effb 100644 --- a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py @@ -1073,9 +1073,7 @@ def test_instruments_with_fastapi_installed(self, mock_version): self.assertEqual(ep.name, "fastapi") @patch("opentelemetry.instrumentation.dependencies.version") - def test_instruments_with_old_fastapi_installed( - self, mock_version - ): # pylint: disable=no-self-use + def test_instruments_with_old_fastapi_installed(self, mock_version): # pylint: disable=no-self-use mock_version.side_effect = mock_version_with_old_fastapi mock_distro = Mock() _load_instrumentors(mock_distro) @@ -1083,9 +1081,7 @@ def test_instruments_with_old_fastapi_installed( mock_distro.load_instrumentor.assert_not_called() @patch("opentelemetry.instrumentation.dependencies.version") - def test_instruments_without_fastapi_installed( - self, mock_version - ): # pylint: disable=no-self-use + def test_instruments_without_fastapi_installed(self, mock_version): # pylint: disable=no-self-use mock_version.side_effect = mock_version_without_fastapi mock_distro = Mock() _load_instrumentors(mock_distro) diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation_custom_headers.py b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation_custom_headers.py index e7adca735c..0a1b20155e 100644 --- a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation_custom_headers.py +++ b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation_custom_headers.py @@ -18,7 +18,6 @@ class MultiMapping(Mapping): - def __init__(self, *items: Tuple[str, str]): self._items = items diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py index 761fa3660f..f80c0de808 100644 --- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py @@ -238,6 +238,7 @@ def response_hook(span: Span, status: str, response_headers: List): API --- """ + import weakref from logging import getLogger from time import time_ns diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py index 717977146e..ff0fa93902 100644 --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py @@ -273,6 +273,7 @@ async def serve(): services ``GRPCTestServer`` and ``GRPCHealthServer``. """ + import os from typing import Callable, Collection, List, Union diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/grpcext/_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/grpcext/_interceptor.py index 32cec6dee0..c7eec06c99 100644 --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/grpcext/_interceptor.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/grpcext/_interceptor.py @@ -17,7 +17,6 @@ """Implementation of gRPC Python interceptors.""" - import collections import grpc diff --git a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py index 8f47181080..2536f36596 100644 --- a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py @@ -191,6 +191,7 @@ async def async_response_hook(span, request, response): API --- """ + import logging import typing from asyncio import iscoroutinefunction @@ -310,7 +311,7 @@ def _inject_propagation_headers(headers, args, kwargs): def _extract_response( response: typing.Union[ httpx.Response, typing.Tuple[int, Headers, httpx.SyncByteStream, dict] - ] + ], ) -> typing.Tuple[int, Headers, httpx.SyncByteStream, dict, str]: if isinstance(response, httpx.Response): status_code = response.status_code @@ -561,7 +562,9 @@ async def __aexit__( await self._transport.__aexit__(exc_type, exc_value, traceback) # pylint: disable=R0914 - async def handle_async_request(self, *args, **kwargs) -> typing.Union[ + async def handle_async_request( + self, *args, **kwargs + ) -> typing.Union[ typing.Tuple[int, "Headers", httpx.AsyncByteStream, dict], httpx.Response, ]: @@ -1043,7 +1046,7 @@ def instrument_client( @staticmethod def uninstrument_client( - client: typing.Union[httpx.Client, httpx.AsyncClient] + client: typing.Union[httpx.Client, httpx.AsyncClient], ): """Disables instrumentation for the given client instance diff --git a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py index 823f8bc028..3efe081034 100644 --- a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py @@ -918,7 +918,6 @@ def test_instrument_client(self): self.assert_span(num_spans=1) def test_instrumentation_without_client(self): - HTTPXClientInstrumentor().instrument() results = [ httpx.get(self.URL), diff --git a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py index b29990d6e3..9b0f4895f9 100644 --- a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py @@ -67,6 +67,7 @@ def consume_hook(span, record, args, kwargs): API ___ """ + from importlib.metadata import PackageNotFoundError, distribution from typing import Collection diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py index ce332d0113..35d202215d 100644 --- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py @@ -36,7 +36,7 @@ get_tracer_provider, ) -__doc__ = _MODULE_DOC +__doc__ = _MODULE_DOC # noqa: A001 LEVELS = { "debug": logging.DEBUG, diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py index b18f93364f..5eb6798231 100644 --- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py +++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py @@ -136,6 +136,4 @@ are not injected into the log record objects. This means any attempted log statements made after setting the logging format and before enabling this integration will result in KeyError exceptions. Such exceptions are automatically swallowed by the logging module and do not result in crashes but you may still lose out on important log messages. -""".format( - default_logging_format=DEFAULT_LOGGING_FORMAT -) +""".format(default_logging_format=DEFAULT_LOGGING_FORMAT) diff --git a/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py b/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py index c8b8744cf3..4045a44204 100644 --- a/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py +++ b/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py @@ -146,9 +146,7 @@ def test_custom_format_and_level_env(self, basic_config_mock): env_patch.stop() @mock.patch("logging.basicConfig") - def test_custom_format_and_level_api( - self, basic_config_mock - ): # pylint: disable=no-self-use + def test_custom_format_and_level_api(self, basic_config_mock): # pylint: disable=no-self-use LoggingInstrumentor().uninstrument() LoggingInstrumentor().instrument( set_logging_format=True, diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/LICENSE b/instrumentation/opentelemetry-instrumentation-openai-v2/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/README.rst b/instrumentation/opentelemetry-instrumentation-openai-v2/README.rst new file mode 100644 index 0000000000..cd7e9b3922 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/README.rst @@ -0,0 +1,26 @@ +OpenTelemetry OpenAI Instrumentation +==================================== + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-openai-v2.svg + :target: https://pypi.org/project/opentelemetry-instrumentation-openai-v2/ + +Instrumentation with OpenAI that supports the openai library and is +specified to trace_integration using 'OpenAI'. + + +Installation +------------ + +:: + + pip install opentelemetry-instrumentation-openai-v2 + + +References +---------- +* `OpenTelemetry OpenAI Instrumentation `_ +* `OpenTelemetry Project `_ +* `OpenTelemetry Python Examples `_ + diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/pyproject.toml b/instrumentation/opentelemetry-instrumentation-openai-v2/pyproject.toml new file mode 100644 index 0000000000..b342cdade7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/pyproject.toml @@ -0,0 +1,54 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "opentelemetry-instrumentation-openai-v2" +dynamic = ["version"] +description = "OpenTelemetry Official OpenAI instrumentation" +readme = "README.rst" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [ + { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] +dependencies = [ + "opentelemetry-api ~= 1.12", + "opentelemetry-instrumentation ~= 0.48b0", + "opentelemetry-semantic-conventions ~= 0.48b0" +] + +[project.optional-dependencies] +instruments = [ + "openai >= 1.0.0", +] + +[project.entry-points.opentelemetry_instrumentor] +openai = "opentelemetry.instrumentation.openai_v2:OpenAIInstrumentor" + +[project.urls] +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-openai-v2" + +[tool.hatch.version] +path = "src/opentelemetry/instrumentation/openai_v2/version.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/src", + "/tests", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/opentelemetry"] diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py new file mode 100644 index 0000000000..347ddb70ff --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py @@ -0,0 +1,78 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +OpenAI client instrumentation supporting `openai`, it can be enabled by +using ``OpenAIInstrumentor``. + +.. _openai: https://pypi.org/project/openai/ + +Usage +----- + +.. code:: python + + from openai import OpenAI + from opentelemetry.instrumentation.openai import OpenAIInstrumentor + + OpenAIInstrumentor().instrument() + + client = OpenAI() + response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + {"role": "user", "content": "Write a short poem on open telemetry."}, + ], + ) + +API +--- +""" + +from typing import Collection + +from wrapt import wrap_function_wrapper + +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.openai_v2.package import _instruments +from opentelemetry.instrumentation.utils import unwrap +from opentelemetry.semconv.schemas import Schemas +from opentelemetry.trace import get_tracer + +from .patch import chat_completions_create + + +class OpenAIInstrumentor(BaseInstrumentor): + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + """Enable OpenAI instrumentation.""" + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer( + __name__, + "", + tracer_provider, + schema_url=Schemas.V1_27_0.value, + ) + wrap_function_wrapper( + module="openai.resources.chat.completions", + name="Completions.create", + wrapper=chat_completions_create(tracer), + ) + + def _uninstrument(self, **kwargs): + import openai + + unwrap(openai.resources.chat.completions.Completions, "create") diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/package.py b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/package.py new file mode 100644 index 0000000000..b53e25f7df --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/package.py @@ -0,0 +1,16 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +_instruments = ("openai >= 1.26.0",) diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py new file mode 100644 index 0000000000..ddc54cad19 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py @@ -0,0 +1,315 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) +from opentelemetry.semconv.attributes import ( + error_attributes as ErrorAttributes, +) +from opentelemetry.trace import Span, SpanKind, Tracer +from opentelemetry.trace.status import Status, StatusCode + +from .utils import ( + extract_content, + extract_tools_prompt, + get_llm_request_attributes, + is_streaming, + set_event_completion, + set_event_prompt, + set_span_attribute, + silently_fail, +) + + +def chat_completions_create(tracer: Tracer): + """Wrap the `create` method of the `ChatCompletion` class to trace it.""" + + def traced_method(wrapped, instance, args, kwargs): + llm_prompts = [] + + for item in kwargs.get("messages", []): + tools_prompt = extract_tools_prompt(item) + llm_prompts.append(tools_prompt if tools_prompt else item) + + span_attributes = {**get_llm_request_attributes(kwargs)} + span_name = f"{span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]} {span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]}" + + span = tracer.start_span( + name=span_name, kind=SpanKind.CLIENT, attributes=span_attributes + ) + if span.is_recording(): + _set_input_attributes(span, span_attributes) + set_event_prompt(span, json.dumps(llm_prompts)) + + try: + result = wrapped(*args, **kwargs) + if is_streaming(kwargs): + return StreamWrapper( + result, + span, + function_call=kwargs.get("functions") is not None, + tool_calls=kwargs.get("tools") is not None, + ) + else: + if span.is_recording(): + _set_response_attributes(span, result) + span.end() + return result + + except Exception as error: + span.set_status(Status(StatusCode.ERROR, str(error))) + if span.is_recording(): + span.set_attribute( + ErrorAttributes.ERROR_TYPE, type(error).__qualname__ + ) + span.end() + raise + + return traced_method + + +@silently_fail +def _set_input_attributes(span, attributes): + for field, value in attributes.items(): + set_span_attribute(span, field, value) + + +@silently_fail +def _set_response_attributes(span, result): + set_span_attribute( + span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, result.model + ) + if getattr(result, "choices", None): + choices = result.choices + responses = [ + { + "role": ( + choice.message.role + if choice.message and choice.message.role + else "assistant" + ), + "content": extract_content(choice), + **( + { + "content_filter_results": choice[ + "content_filter_results" + ] + } + if "content_filter_results" in choice + else {} + ), + } + for choice in choices + ] + finish_reasons = [] + for choice in choices: + finish_reasons.append(choice.finish_reason or "error") + + set_span_attribute( + span, + GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reasons, + ) + set_event_completion(span, responses) + + if getattr(result, "id", None): + set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_ID, result.id) + + # Get the usage + if getattr(result, "usage", None): + set_span_attribute( + span, + GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, + result.usage.prompt_tokens, + ) + set_span_attribute( + span, + GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, + result.usage.completion_tokens, + ) + + +class StreamWrapper: + span: Span + response_id: str = "" + response_model: str = "" + + def __init__( + self, + stream, + span, + prompt_tokens=0, + function_call=False, + tool_calls=False, + ): + self.stream = stream + self.span = span + self.prompt_tokens = prompt_tokens + self.function_call = function_call + self.tool_calls = tool_calls + self.result_content = [] + self.completion_tokens = 0 + self._span_started = False + self.setup() + + def setup(self): + if not self._span_started: + self._span_started = True + + def cleanup(self): + if self._span_started: + if self.response_model: + set_span_attribute( + self.span, + GenAIAttributes.GEN_AI_RESPONSE_MODEL, + self.response_model, + ) + + if self.response_id: + set_span_attribute( + self.span, + GenAIAttributes.GEN_AI_RESPONSE_ID, + self.response_id, + ) + + set_span_attribute( + self.span, + GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, + self.prompt_tokens, + ) + set_span_attribute( + self.span, + GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, + self.completion_tokens, + ) + set_event_completion( + self.span, + [ + { + "role": "assistant", + "content": "".join(self.result_content), + } + ], + ) + + self.span.end() + self._span_started = False + + def __enter__(self): + self.setup() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + if exc_type is not None: + self.span.set_status(Status(StatusCode.ERROR, str(exc_val))) + self.span.set_attribute( + ErrorAttributes.ERROR_TYPE, exc_type.__qualname__ + ) + finally: + self.cleanup() + return False # Propagate the exception + + def __iter__(self): + return self + + def __next__(self): + try: + chunk = next(self.stream) + self.process_chunk(chunk) + return chunk + except StopIteration: + self.cleanup() + raise + except Exception as error: + self.span.set_status(Status(StatusCode.ERROR, str(error))) + self.span.set_attribute( + ErrorAttributes.ERROR_TYPE, type(error).__qualname__ + ) + self.cleanup() + raise + + def set_response_model(self, chunk): + if self.response_model: + return + + if getattr(chunk, "model", None): + self.response_model = chunk.model + + def set_response_id(self, chunk): + if self.response_id: + return + + if getattr(chunk, "id", None): + self.response_id = chunk.id + + def build_streaming_response(self, chunk): + if getattr(chunk, "choices", None) is None: + return + + choices = chunk.choices + content = [] + if not self.function_call and not self.tool_calls: + for choice in choices: + if choice.delta and choice.delta.content is not None: + content = [choice.delta.content] + + elif self.function_call: + for choice in choices: + if ( + choice.delta + and choice.delta.function_call is not None + and choice.delta.function_call.arguments is not None + ): + content = [choice.delta.function_call.arguments] + + elif self.tool_calls: + for choice in choices: + if choice.delta and choice.delta.tool_calls is not None: + toolcalls = choice.delta.tool_calls + content = [] + for tool_call in toolcalls: + if ( + tool_call + and tool_call.function is not None + and tool_call.function.arguments is not None + ): + content.append(tool_call.function.arguments) + + finish_reasons = [] + for choice in choices: + finish_reasons.append(choice.finish_reason or "error") + + set_span_attribute( + self.span, + GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reasons, + ) + if content: + self.result_content.append(content[0]) + + def set_usage(self, chunk): + if getattr(chunk, "usage", None): + self.completion_tokens = chunk.usage.completion_tokens + self.prompt_tokens = chunk.usage.prompt_tokens + + def process_chunk(self, chunk): + self.set_response_id(chunk) + self.set_response_model(chunk) + self.build_streaming_response(chunk) + self.set_usage(chunk) diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py new file mode 100644 index 0000000000..ba2301cc5d --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py @@ -0,0 +1,164 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +from typing import Optional, Union + +from openai import NOT_GIVEN + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + + +def silently_fail(func): + """ + A decorator that catches exceptions thrown by the decorated function and logs them as warnings. + """ + + logger = logging.getLogger(func.__module__) + + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as exception: + logger.warning( + "Failed to execute %s, error: %s", + func.__name__, + str(exception), + ) + + return wrapper + + +def extract_content(choice): + if getattr(choice, "message", None) is None: + return "" + + # Check if choice.message exists and has a content attribute + message = choice.message + if getattr(message, "content", None): + return choice.message.content + + # Check if choice.message has tool_calls and extract information accordingly + elif getattr(message, "tool_calls", None): + result = [ + { + "id": tool_call.id, + "type": tool_call.type, + "function": { + "name": tool_call.function.name, + "arguments": tool_call.function.arguments, + }, + } + for tool_call in choice.message.tool_calls + ] + return result + + # Check if choice.message has a function_call and extract information accordingly + elif getattr(message, "function_call", None): + return { + "name": choice.message.function_call.name, + "arguments": choice.message.function_call.arguments, + } + + # Return an empty string if none of the above conditions are met + else: + return "" + + +def extract_tools_prompt(item): + tool_calls = getattr(item, "tool_calls", None) + if tool_calls is None: + return + + calls = [] + for tool_call in tool_calls: + tool_call_dict = { + "id": getattr(tool_call, "id", ""), + "type": getattr(tool_call, "type", ""), + } + + if hasattr(tool_call, "function"): + tool_call_dict["function"] = { + "name": getattr(tool_call.function, "name", ""), + "arguments": getattr(tool_call.function, "arguments", ""), + } + calls.append(tool_call_dict) + return calls + + +def set_event_prompt(span, prompt): + span.add_event( + name="gen_ai.content.prompt", + attributes={ + GenAIAttributes.GEN_AI_PROMPT: prompt, + }, + ) + + +def set_span_attributes(span, attributes: dict): + for field, value in attributes.model_dump(by_alias=True).items(): + set_span_attribute(span, field, value) + + +def set_event_completion(span, result_content): + span.add_event( + name="gen_ai.content.completion", + attributes={ + GenAIAttributes.GEN_AI_COMPLETION: json.dumps(result_content), + }, + ) + + +def set_span_attribute(span, name, value): + if non_numerical_value_is_set(value) is False: + return + + span.set_attribute(name, value) + + +def is_streaming(kwargs): + return non_numerical_value_is_set(kwargs.get("stream")) + + +def non_numerical_value_is_set(value: Optional[Union[bool, str]]): + return bool(value) and value != NOT_GIVEN + + +def get_llm_request_attributes( + kwargs, + operation_name=GenAIAttributes.GenAiOperationNameValues.CHAT.value, +): + attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name, + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + GenAIAttributes.GEN_AI_REQUEST_MODEL: kwargs.get( + "model", "gpt-3.5-turbo" + ), + GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get("temperature"), + GenAIAttributes.GEN_AI_REQUEST_TOP_P: kwargs.get("p") + or kwargs.get("top_p"), + GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"), + GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY: kwargs.get( + "presence_penalty" + ), + GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: kwargs.get( + "frequency_penalty" + ), + } + + # filter out None values + return {k: v for k, v in attributes.items() if v is not None} diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/version.py b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/version.py new file mode 100644 index 0000000000..0e95b73df6 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "2.0.0.dev" diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/test-requirements.txt b/instrumentation/opentelemetry-instrumentation-openai-v2/test-requirements.txt new file mode 100644 index 0000000000..198226dcfa --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/test-requirements.txt @@ -0,0 +1,11 @@ +openai==1.26.0 +pydantic==2.8.2 +Deprecated==1.2.14 +importlib-metadata==6.11.0 +packaging==24.0 +pytest==7.4.4 +pytest-vcr==1.0.2 +wrapt==1.16.0 + +-e opentelemetry-instrumentation +-e instrumentation/opentelemetry-instrumentation-openai-v2 diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/tests/__init__.py b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion.yaml b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion.yaml new file mode 100644 index 0000000000..3bfd94e415 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion.yaml @@ -0,0 +1,92 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": "Say this is a test"}], "model": + "gpt-4", "stream": false}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '100' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.47.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.47.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.5 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-AC6ajKKHHpvf6x2Qm35t6m3QE8qli\",\n \"object\": + \"chat.completion\",\n \"created\": 1727448637,\n \"model\": \"gpt-4-0613\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"This is a test.\",\n \"refusal\": + null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\": + 5,\n \"total_tokens\": 17,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": + 0\n }\n },\n \"system_fingerprint\": null\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c9c4e9b7fb674d8-PMO + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Fri, 27 Sep 2024 14:50:37 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + content-length: + - '551' + openai-organization: test_organization + openai-processing-ms: + - '434' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '1000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '999977' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_5f2690abaf909a9f047488694d44495e + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_streaming.yaml b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_streaming.yaml new file mode 100644 index 0000000000..c61133739b --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_streaming.yaml @@ -0,0 +1,113 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": "Say this is a test"}], "model": + "gpt-4", "stream": true, "stream_options": {"include_usage": true}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '142' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.47.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.47.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.5 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"This"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + is"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + a"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + test"},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null} + + + data: {"id":"chatcmpl-AC6akONKCxc8HS63qZ08HyjeTSq6p","object":"chat.completion.chunk","created":1727448638,"model":"gpt-4-0613","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":12,"completion_tokens":5,"total_tokens":17,"completion_tokens_details":{"reasoning_tokens":0}}} + + + data: [DONE] + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c9c4ea489d57948-PMO + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Fri, 27 Sep 2024 14:50:38 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + openai-organization: test_organization + openai-processing-ms: + - '161' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '1000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '999977' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_3fa9ac9f3693c712e4c377e26d203e58 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/tests/conftest.py b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/conftest.py new file mode 100644 index 0000000000..976d58d7d8 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/conftest.py @@ -0,0 +1,70 @@ +"""Unit tests configuration module.""" + +import os + +import pytest +from openai import OpenAI + +from opentelemetry import trace +from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + + +@pytest.fixture(scope="session") +def exporter(): + exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(exporter) + + provider = TracerProvider() + provider.add_span_processor(processor) + trace.set_tracer_provider(provider) + + return exporter + + +@pytest.fixture(autouse=True) +def clear_exporter(exporter): + exporter.clear() + + +@pytest.fixture(autouse=True) +def environment(): + if not os.getenv("OPENAI_API_KEY"): + os.environ["OPENAI_API_KEY"] = "test-api-key" + + +@pytest.fixture +def openai_client(): + return OpenAI() + + +@pytest.fixture(scope="module") +def vcr_config(): + return { + "filter_headers": ["authorization", "api-key"], + "decode_compressed_response": True, + "before_record_response": scrub_response_headers, + } + + +@pytest.fixture(scope="session", autouse=True) +def instrument(): + OpenAIInstrumentor().instrument() + + +@pytest.fixture(scope="session", autouse=True) +def uninstrument(): + OpenAIInstrumentor().uninstrument() + + +def scrub_response_headers(response): + """ + This scrubs sensitive response headers. Note they are case-sensitive! + """ + response["headers"]["openai-organization"] = "test_organization" + response["headers"]["Set-Cookie"] = "test_set_cookie" + return response diff --git a/instrumentation/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py new file mode 100644 index 0000000000..2b0f6a7f8c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py @@ -0,0 +1,157 @@ +import json + +import pytest + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + + +@pytest.mark.vcr() +def test_chat_completion(exporter, openai_client): + llm_model_value = "gpt-4" + messages_value = [{"role": "user", "content": "Say this is a test"}] + + kwargs = { + "model": llm_model_value, + "messages": messages_value, + "stream": False, + } + + response = openai_client.chat.completions.create(**kwargs) + spans = exporter.get_finished_spans() + chat_completion_span = spans[0] + # assert that the span name is correct + assert chat_completion_span.name == f"chat {llm_model_value}" + + attributes = chat_completion_span.attributes + operation_name = attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + system = attributes[GenAIAttributes.GEN_AI_SYSTEM] + request_model = attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + response_model = attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] + response_id = attributes[GenAIAttributes.GEN_AI_RESPONSE_ID] + input_tokens = attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] + output_tokens = attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + # assert that the attributes are correct + assert ( + operation_name == GenAIAttributes.GenAiOperationNameValues.CHAT.value + ) + assert system == GenAIAttributes.GenAiSystemValues.OPENAI.value + assert request_model == llm_model_value + assert response_model == response.model + assert response_id == response.id + assert input_tokens == response.usage.prompt_tokens + assert output_tokens == response.usage.completion_tokens + + events = chat_completion_span.events + + # assert that the prompt and completion events are present + prompt_event = list( + filter( + lambda event: event.name == "gen_ai.content.prompt", + events, + ) + ) + completion_event = list( + filter( + lambda event: event.name == "gen_ai.content.completion", + events, + ) + ) + + assert prompt_event + assert completion_event + + # assert that the prompt and completion events have the correct attributes + assert prompt_event[0].attributes[ + GenAIAttributes.GEN_AI_PROMPT + ] == json.dumps(messages_value) + + assert ( + json.loads( + completion_event[0].attributes[GenAIAttributes.GEN_AI_COMPLETION] + )[0]["content"] + == response.choices[0].message.content + ) + + +@pytest.mark.vcr() +def test_chat_completion_streaming(exporter, openai_client): + llm_model_value = "gpt-4" + messages_value = [{"role": "user", "content": "Say this is a test"}] + + kwargs = { + "model": llm_model_value, + "messages": messages_value, + "stream": True, + "stream_options": {"include_usage": True}, + } + + response_stream_usage = None + response_stream_model = None + response_stream_id = None + response_stream_result = "" + response = openai_client.chat.completions.create(**kwargs) + for chunk in response: + if chunk.choices: + response_stream_result += chunk.choices[0].delta.content or "" + + # get the last chunk + if getattr(chunk, "usage", None): + response_stream_usage = chunk.usage + response_stream_model = chunk.model + response_stream_id = chunk.id + + spans = exporter.get_finished_spans() + streaming_span = spans[0] + + assert streaming_span.name == f"chat {llm_model_value}" + attributes = streaming_span.attributes + + operation_name = attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + system = attributes[GenAIAttributes.GEN_AI_SYSTEM] + request_model = attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + response_model = attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] + response_id = attributes[GenAIAttributes.GEN_AI_RESPONSE_ID] + input_tokens = attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] + output_tokens = attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + assert ( + operation_name == GenAIAttributes.GenAiOperationNameValues.CHAT.value + ) + assert system == GenAIAttributes.GenAiSystemValues.OPENAI.value + assert request_model == llm_model_value + assert response_model == response_stream_model + assert response_id == response_stream_id + assert input_tokens == response_stream_usage.prompt_tokens + assert output_tokens == response_stream_usage.completion_tokens + + events = streaming_span.events + + # assert that the prompt and completion events are present + prompt_event = list( + filter( + lambda event: event.name == "gen_ai.content.prompt", + events, + ) + ) + completion_event = list( + filter( + lambda event: event.name == "gen_ai.content.completion", + events, + ) + ) + + assert prompt_event + assert completion_event + + # assert that the prompt and completion events have the correct attributes + assert prompt_event[0].attributes[ + GenAIAttributes.GEN_AI_PROMPT + ] == json.dumps(messages_value) + + assert ( + json.loads( + completion_event[0].attributes[GenAIAttributes.GEN_AI_COMPLETION] + )[0]["content"] + == response_stream_result + ) diff --git a/instrumentation/opentelemetry-instrumentation-psycopg/src/opentelemetry/instrumentation/psycopg/__init__.py b/instrumentation/opentelemetry-instrumentation-psycopg/src/opentelemetry/instrumentation/psycopg/__init__.py index 4f61713b29..e986ec0d46 100644 --- a/instrumentation/opentelemetry-instrumentation-psycopg/src/opentelemetry/instrumentation/psycopg/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-psycopg/src/opentelemetry/instrumentation/psycopg/__init__.py @@ -185,10 +185,12 @@ def _uninstrument(self, **kwargs): """ "Disable Psycopg instrumentation""" dbapi.unwrap_connect(psycopg, "connect") # pylint: disable=no-member dbapi.unwrap_connect( - psycopg.Connection, "connect" # pylint: disable=no-member + psycopg.Connection, + "connect", # pylint: disable=no-member ) dbapi.unwrap_connect( - psycopg.AsyncConnection, "connect" # pylint: disable=no-member + psycopg.AsyncConnection, + "connect", # pylint: disable=no-member ) # TODO(owais): check if core dbapi can do this for all dbapi implementations e.g, pymysql and mysql diff --git a/instrumentation/opentelemetry-instrumentation-pymemcache/tests/test_pymemcache.py b/instrumentation/opentelemetry-instrumentation-pymemcache/tests/test_pymemcache.py index 4e29091217..f888009017 100644 --- a/instrumentation/opentelemetry-instrumentation-pymemcache/tests/test_pymemcache.py +++ b/instrumentation/opentelemetry-instrumentation-pymemcache/tests/test_pymemcache.py @@ -52,9 +52,7 @@ ) -class PymemcacheClientTestCase( - TestBase -): # pylint: disable=too-many-public-methods +class PymemcacheClientTestCase(TestBase): # pylint: disable=too-many-public-methods """Tests for a patched pymemcache.client.base.Client.""" def setUp(self): diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py index f55aa2be33..e0721f2f2d 100644 --- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py @@ -74,6 +74,7 @@ def failed_hook(span, event): collection.find_one() """ + from logging import getLogger from typing import Callable, Collection diff --git a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/__init__.py b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/__init__.py index 7d3c8a334a..6136d55558 100644 --- a/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-pyramid/src/opentelemetry/instrumentation/pyramid/__init__.py @@ -184,6 +184,7 @@ API --- """ + import platform from typing import Collection diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py index 1d3b8b8a87..e81beb6f3d 100644 --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py @@ -90,6 +90,7 @@ def response_hook(span, instance, response): API --- """ + import typing from typing import Any, Collection diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py index 24ca387861..aa26ee7d1c 100644 --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py @@ -15,6 +15,7 @@ """ Some utils used by the redis integration """ + from opentelemetry.semconv.trace import ( DbSystemValues, NetTransportValues, diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py index 56e544edcd..9f09168c6f 100644 --- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py @@ -43,6 +43,7 @@ def multiply(x, y): multiply.send(43, 51) """ + from typing import Collection from remoulade import Middleware, broker diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py index 2107bc3e23..9889e18b5a 100644 --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py @@ -94,6 +94,7 @@ API --- """ + from collections.abc import Sequence from typing import Collection diff --git a/instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py b/instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py index 474a942a98..50d2fb03d8 100644 --- a/instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-starlette/src/opentelemetry/instrumentation/starlette/__init__.py @@ -169,6 +169,7 @@ def client_response_hook(span: Span, scope: dict[str, Any], message: dict[str, A API --- """ + from typing import Collection from starlette import applications diff --git a/instrumentation/opentelemetry-instrumentation-tornado/src/opentelemetry/instrumentation/tornado/__init__.py b/instrumentation/opentelemetry-instrumentation-tornado/src/opentelemetry/instrumentation/tornado/__init__.py index 0b5e06b526..3a19450433 100644 --- a/instrumentation/opentelemetry-instrumentation-tornado/src/opentelemetry/instrumentation/tornado/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-tornado/src/opentelemetry/instrumentation/tornado/__init__.py @@ -152,7 +152,6 @@ def client_response_hook(span, future): --- """ - from collections import namedtuple from functools import partial from logging import getLogger diff --git a/instrumentation/opentelemetry-instrumentation-tortoiseorm/src/opentelemetry/instrumentation/tortoiseorm/__init__.py b/instrumentation/opentelemetry-instrumentation-tortoiseorm/src/opentelemetry/instrumentation/tortoiseorm/__init__.py index cebcb81ced..da31287c83 100644 --- a/instrumentation/opentelemetry-instrumentation-tortoiseorm/src/opentelemetry/instrumentation/tortoiseorm/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-tortoiseorm/src/opentelemetry/instrumentation/tortoiseorm/__init__.py @@ -39,6 +39,7 @@ API --- """ + from typing import Collection import wrapt diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py index d9072ba727..8b72a2f3db 100644 --- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py @@ -197,9 +197,7 @@ def _instrument(self, **kwargs): def _uninstrument(self, **kwargs): _uninstrument() - def uninstrument_opener( - self, opener: OpenerDirector - ): # pylint: disable=no-self-use + def uninstrument_opener(self, opener: OpenerDirector): # pylint: disable=no-self-use """uninstrument_opener a specific instance of urllib.request.OpenerDirector""" _uninstrument_from(opener, restore_as_bound_func=True) @@ -376,7 +374,6 @@ def _set_status_code_attribute( metric_attributes: dict = None, sem_conv_opt_in_mode: _HTTPStabilityMode = _HTTPStabilityMode.DEFAULT, ) -> None: - status_code_str = str(status_code) try: status_code = int(status_code) diff --git a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py index 7a9bfd38f1..72fe6ef66f 100644 --- a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py @@ -404,7 +404,6 @@ def test_basic_metric_request_not_empty(self): ) def test_metric_uninstrument(self): with request.urlopen(self.URL): - self.assertEqual( len( ( @@ -452,7 +451,6 @@ def test_metric_uninstrument(self): ) with request.urlopen(self.URL): - self.assertEqual( len( ( @@ -502,7 +500,6 @@ def test_metric_uninstrument(self): URLLibInstrumentor().uninstrument() with request.urlopen(self.URL): - self.assertEqual( len( ( diff --git a/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py index 1c83f3f447..eda66bea37 100644 --- a/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py @@ -463,7 +463,6 @@ def _set_status_code_attribute( metric_attributes: dict = None, sem_conv_opt_in_mode: _HTTPStabilityMode = _HTTPStabilityMode.DEFAULT, ) -> None: - status_code_str = str(status_code) try: status_code = int(status_code) @@ -490,7 +489,6 @@ def _set_metric_attributes( method: str, sem_conv_opt_in_mode: _HTTPStabilityMode = _HTTPStabilityMode.DEFAULT, ) -> None: - _set_http_host_client( metric_attributes, instance.host, sem_conv_opt_in_mode ) diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py b/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py index 777d19f41d..095e263732 100644 --- a/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py @@ -696,9 +696,9 @@ def test_request_attributes_with_nonstandard_port_and_no_host(self): self.validate_url("http://127.0.0.1:443/", has_host=False) def test_request_attributes_with_conflicting_nonstandard_port(self): - self.environ[ - "HTTP_HOST" - ] += ":8080" # Note that we do not correct SERVER_PORT + self.environ["HTTP_HOST"] += ( + ":8080" # Note that we do not correct SERVER_PORT + ) expected = { SpanAttributes.HTTP_HOST: "127.0.0.1:8080", SpanAttributes.HTTP_URL: "http://127.0.0.1:8080/", diff --git a/opentelemetry-contrib-instrumentations/pyproject.toml b/opentelemetry-contrib-instrumentations/pyproject.toml index b34226b669..2b6b2cfedb 100644 --- a/opentelemetry-contrib-instrumentations/pyproject.toml +++ b/opentelemetry-contrib-instrumentations/pyproject.toml @@ -57,6 +57,7 @@ dependencies = [ "opentelemetry-instrumentation-logging==0.49b0.dev", "opentelemetry-instrumentation-mysql==0.49b0.dev", "opentelemetry-instrumentation-mysqlclient==0.49b0.dev", + "opentelemetry-instrumentation-openai-v2==2.0.0.dev", "opentelemetry-instrumentation-pika==0.49b0.dev", "opentelemetry-instrumentation-psycopg==0.49b0.dev", "opentelemetry-instrumentation-psycopg2==0.49b0.dev", diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py index 7154238bb7..acc81c701c 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py @@ -137,7 +137,9 @@ def _load_configurators(): configurator_name is None or configurator_name == entry_point.name ): - entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore + entry_point.load()().configure( + auto_instrumentation_version=__version__ + ) # type: ignore configured = entry_point.name else: _logger.warning( diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py index 02926ea5c4..cc0ac68f1c 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py @@ -22,12 +22,15 @@ SubprocessError, check_call, ) +from typing import Optional from packaging.requirements import Requirement from opentelemetry.instrumentation.bootstrap_gen import ( - default_instrumentations, - libraries, + default_instrumentations as gen_default_instrumentations, +) +from opentelemetry.instrumentation.bootstrap_gen import ( + libraries as gen_libraries, ) from opentelemetry.instrumentation.version import __version__ from opentelemetry.util._importlib_metadata import ( @@ -75,7 +78,7 @@ def _sys_pip_install(package): print(error) -def _pip_check(): +def _pip_check(libraries): """Ensures none of the instrumentations have dependency conflicts. Clean check reported as: 'No broken requirements found.' @@ -113,7 +116,7 @@ def _is_installed(req): return True -def _find_installed_libraries(): +def _find_installed_libraries(default_instrumentations, libraries): for lib in default_instrumentations: yield lib @@ -122,18 +125,25 @@ def _find_installed_libraries(): yield lib["instrumentation"] -def _run_requirements(): +def _run_requirements(default_instrumentations, libraries): logger.setLevel(logging.ERROR) - print("\n".join(_find_installed_libraries())) + print( + "\n".join( + _find_installed_libraries(default_instrumentations, libraries) + ) + ) -def _run_install(): - for lib in _find_installed_libraries(): +def _run_install(default_instrumentations, libraries): + for lib in _find_installed_libraries(default_instrumentations, libraries): _sys_pip_install(lib) - _pip_check() + _pip_check(libraries) -def run() -> None: +def run( + default_instrumentations: Optional[list] = None, + libraries: Optional[list] = None, +) -> None: action_install = "install" action_requirements = "requirements" @@ -163,8 +173,14 @@ def run() -> None: ) args = parser.parse_args() + if libraries is None: + libraries = gen_libraries + + if default_instrumentations is None: + default_instrumentations = gen_default_instrumentations + cmd = { action_install: _run_install, action_requirements: _run_requirements, }[args.action] - cmd() + cmd(default_instrumentations, libraries) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index ff6065d058..95d25026f9 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -120,6 +120,10 @@ "library": "mysqlclient < 3", "instrumentation": "opentelemetry-instrumentation-mysqlclient==0.49b0.dev", }, + { + "library": "openai >= 1.0.0", + "instrumentation": "opentelemetry-instrumentation-openai-v2==2.0.0.dev", + }, { "library": "pika >= 0.12.0", "instrumentation": "opentelemetry-instrumentation-pika==0.49b0.dev", diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/distro.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/distro.py index 1bc847f988..1b450f2549 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/distro.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/distro.py @@ -32,7 +32,6 @@ class BaseDistro(ABC): _instance = None def __new__(cls, *args, **kwargs): - if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py index 73c000ee9c..a0d9ae18f9 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py @@ -14,8 +14,9 @@ import urllib.parse from contextlib import contextmanager +from importlib import import_module from re import escape, sub -from typing import Dict, Iterable, Sequence +from typing import Dict, Iterable, Sequence, Union from wrapt import ObjectProxy @@ -80,13 +81,30 @@ def http_status_to_status_code( return StatusCode.ERROR -def unwrap(obj, attr: str): +def unwrap(obj: Union[object, str], attr: str): """Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it + The object containing the function to unwrap may be passed as dotted module path string. + Args: - obj: Object that holds a reference to the wrapped function + obj: Object that holds a reference to the wrapped function or dotted import path as string attr (str): Name of the wrapped function """ + if isinstance(obj, str): + try: + module_path, class_name = obj.rsplit(".", 1) + except ValueError as exc: + raise ImportError( + f"Cannot parse '{obj}' as dotted import path" + ) from exc + module = import_module(module_path) + try: + obj = getattr(module, class_name) + except AttributeError as exc: + raise ImportError( + f"Cannot import '{class_name}' from '{module}'" + ) from exc + func = getattr(obj, attr, None) if func and isinstance(func, ObjectProxy) and hasattr(func, "__wrapped__"): setattr(obj, attr, func.__wrapped__) diff --git a/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py b/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py index 98bad3d9f9..2d8538b5b3 100644 --- a/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py +++ b/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py @@ -33,9 +33,7 @@ class TestLoad(TestCase): @patch( "opentelemetry.instrumentation.auto_instrumentation._load.entry_points" ) - def test_load_configurators( - self, iter_mock - ): # pylint: disable=no-self-use + def test_load_configurators(self, iter_mock): # pylint: disable=no-self-use # Add multiple entry points but only specify the 2nd in the environment variable. ep_mock1 = Mock() ep_mock1.name = "custom_configurator1" @@ -64,9 +62,7 @@ def test_load_configurators( @patch( "opentelemetry.instrumentation.auto_instrumentation._load.entry_points" ) - def test_load_configurators_no_ep( - self, iter_mock - ): # pylint: disable=no-self-use + def test_load_configurators_no_ep(self, iter_mock): # pylint: disable=no-self-use iter_mock.return_value = () # Confirm method does not crash if not entry points exist. _load._load_configurators() @@ -288,9 +284,7 @@ def test_load_instrumentors(self, iter_mock, dep_mock): @patch( "opentelemetry.instrumentation.auto_instrumentation._load.entry_points" ) - def test_load_instrumentors_dep_conflict( - self, iter_mock, dep_mock - ): # pylint: disable=no-self-use + def test_load_instrumentors_dep_conflict(self, iter_mock, dep_mock): # pylint: disable=no-self-use ep_mock1 = Mock() ep_mock1.name = "instr1" diff --git a/opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py b/opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py index 9fd3a21711..ec01e4089b 100644 --- a/opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py +++ b/opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py @@ -93,9 +93,7 @@ class TestExecl(TestCase): @patch("sys.argv", ["1", "2", "3"]) @patch("opentelemetry.instrumentation.auto_instrumentation.which") @patch("opentelemetry.instrumentation.auto_instrumentation.execl") - def test_execl( - self, mock_execl, mock_which - ): # pylint: disable=no-self-use + def test_execl(self, mock_execl, mock_which): # pylint: disable=no-self-use mock_which.configure_mock(**{"return_value": "python"}) auto_instrumentation.run() diff --git a/opentelemetry-instrumentation/tests/test_bootstrap.py b/opentelemetry-instrumentation/tests/test_bootstrap.py index 4807f0beb7..0ded8d37b1 100644 --- a/opentelemetry-instrumentation/tests/test_bootstrap.py +++ b/opentelemetry-instrumentation/tests/test_bootstrap.py @@ -19,7 +19,10 @@ from unittest.mock import call, patch from opentelemetry.instrumentation import bootstrap -from opentelemetry.instrumentation.bootstrap_gen import libraries +from opentelemetry.instrumentation.bootstrap_gen import ( + default_instrumentations, + libraries, +) def sample_packages(packages, rate): @@ -56,15 +59,15 @@ def setUpClass(cls): "opentelemetry.instrumentation.bootstrap._pip_check", ) - cls.pkg_patcher.start() - cls.mock_pip_install = cls.pip_install_patcher.start() - cls.mock_pip_check = cls.pip_check_patcher.start() + def setUp(self): + super().setUp() + self.mock_pip_check = self.pip_check_patcher.start() + self.mock_pip_install = self.pip_install_patcher.start() - @classmethod - def tearDownClass(cls): - cls.pip_check_patcher.start() - cls.pip_install_patcher.start() - cls.pkg_patcher.stop() + def tearDown(self): + super().tearDown() + self.pip_check_patcher.stop() + self.pip_install_patcher.stop() @patch("sys.argv", ["bootstrap", "-a", "pipenv"]) def test_run_unknown_cmd(self): @@ -73,18 +76,44 @@ def test_run_unknown_cmd(self): @patch("sys.argv", ["bootstrap", "-a", "requirements"]) def test_run_cmd_print(self): + self.pkg_patcher.start() with patch("sys.stdout", new=StringIO()) as fake_out: bootstrap.run() self.assertEqual( fake_out.getvalue(), "\n".join(self.installed_libraries) + "\n", ) + self.pkg_patcher.stop() @patch("sys.argv", ["bootstrap", "-a", "install"]) def test_run_cmd_install(self): + self.pkg_patcher.start() bootstrap.run() self.mock_pip_install.assert_has_calls( [call(i) for i in self.installed_libraries], any_order=True, ) - self.assertEqual(self.mock_pip_check.call_count, 1) + self.mock_pip_check.assert_called_once() + self.pkg_patcher.stop() + + @patch("sys.argv", ["bootstrap", "-a", "install"]) + def test_can_override_available_libraries(self): + bootstrap.run(libraries=[]) + self.mock_pip_install.assert_has_calls( + [call(i) for i in default_instrumentations], + any_order=True, + ) + self.mock_pip_check.assert_called_once() + + @patch("sys.argv", ["bootstrap", "-a", "install"]) + def test_can_override_available_default_instrumentations(self): + with patch( + "opentelemetry.instrumentation.bootstrap._is_installed", + return_value=True, + ): + bootstrap.run(default_instrumentations=[]) + self.mock_pip_install.assert_has_calls( + [call(i) for i in self.installed_libraries], + any_order=True, + ) + self.mock_pip_check.assert_called_once() diff --git a/opentelemetry-instrumentation/tests/test_distro.py b/opentelemetry-instrumentation/tests/test_distro.py index 03a95614df..9801264cbe 100644 --- a/opentelemetry-instrumentation/tests/test_distro.py +++ b/opentelemetry-instrumentation/tests/test_distro.py @@ -32,9 +32,7 @@ def _uninstrument(self, **kwargs): class MockEntryPoint(EntryPoint): - def __init__( - self, name, value, group - ): # pylint: disable=super-init-not-called + def __init__(self, name, value, group): # pylint: disable=super-init-not-called pass def load(self, *args, **kwargs): # pylint: disable=signature-differs diff --git a/opentelemetry-instrumentation/tests/test_utils.py b/opentelemetry-instrumentation/tests/test_utils.py index d3807a1bdb..5ddd45d692 100644 --- a/opentelemetry-instrumentation/tests/test_utils.py +++ b/opentelemetry-instrumentation/tests/test_utils.py @@ -15,6 +15,8 @@ import unittest from http import HTTPStatus +from wrapt import ObjectProxy, wrap_function_wrapper + from opentelemetry.context import ( _SUPPRESS_HTTP_INSTRUMENTATION_KEY, _SUPPRESS_INSTRUMENTATION_KEY, @@ -29,10 +31,19 @@ is_instrumentation_enabled, suppress_http_instrumentation, suppress_instrumentation, + unwrap, ) from opentelemetry.trace import StatusCode +class WrappedClass: + def method(self): + pass + + def wrapper_method(self): + pass + + class TestUtils(unittest.TestCase): # See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status def test_http_status_to_status_code(self): @@ -240,3 +251,75 @@ def test_suppress_http_instrumentation_key(self): self.assertTrue(get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY)) self.assertIsNone(get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY)) + + +class UnwrapTestCase(unittest.TestCase): + @staticmethod + def _wrap_method(): + return wrap_function_wrapper( + WrappedClass, "method", WrappedClass.wrapper_method + ) + + def test_can_unwrap_object_attribute(self): + self._wrap_method() + instance = WrappedClass() + self.assertTrue(isinstance(instance.method, ObjectProxy)) + + unwrap(WrappedClass, "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) + + def test_can_unwrap_object_attribute_as_string(self): + self._wrap_method() + instance = WrappedClass() + self.assertTrue(isinstance(instance.method, ObjectProxy)) + + unwrap("tests.test_utils.WrappedClass", "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) + + def test_raises_import_error_if_path_not_well_formed(self): + self._wrap_method() + instance = WrappedClass() + self.assertTrue(isinstance(instance.method, ObjectProxy)) + + with self.assertRaisesRegex( + ImportError, "Cannot parse '' as dotted import path" + ): + unwrap("", "method") + + unwrap(WrappedClass, "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) + + def test_raises_import_error_if_cannot_find_module(self): + self._wrap_method() + instance = WrappedClass() + self.assertTrue(isinstance(instance.method, ObjectProxy)) + + with self.assertRaisesRegex(ImportError, "No module named 'does'"): + unwrap("does.not.exist.WrappedClass", "method") + + unwrap(WrappedClass, "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) + + def test_raises_import_error_if_cannot_find_object(self): + self._wrap_method() + instance = WrappedClass() + self.assertTrue(isinstance(instance.method, ObjectProxy)) + + with self.assertRaisesRegex( + ImportError, "Cannot import 'NotWrappedClass' from" + ): + unwrap("tests.test_utils.NotWrappedClass", "method") + + unwrap(WrappedClass, "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) + + # pylint: disable=no-self-use + def test_does_nothing_if_cannot_find_attribute(self): + instance = WrappedClass() + unwrap(instance, "method_not_found") + + def test_does_nothing_if_attribute_is_not_from_wrapt(self): + instance = WrappedClass() + self.assertFalse(isinstance(instance.method, ObjectProxy)) + unwrap(WrappedClass, "method") + self.assertFalse(isinstance(instance.method, ObjectProxy)) diff --git a/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py b/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py index 295a5def9b..d9b99f35ca 100644 --- a/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py +++ b/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py @@ -340,7 +340,6 @@ def extract( context: typing.Optional[Context] = None, getter: Getter[CarrierT] = default_getter, ) -> Context: - xray_context = super().extract(carrier, context=context, getter=getter) if trace.get_current_span(context=context).get_span_context().is_valid: diff --git a/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py b/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py index 2d8937e1b3..231b5da55e 100644 --- a/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py +++ b/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py @@ -37,9 +37,7 @@ class AwsXRayLambdaPropagatorTest(TestCase): - def test_extract_no_environment_variable(self): - actual_context = get_current_span( AwsXRayLambdaPropagator().extract( {}, context=get_current(), getter=DefaultGetter() @@ -54,9 +52,7 @@ def test_extract_no_environment_variable(self): self.assertEqual(actual_context.trace_state, TraceState.get_default()) def test_extract_no_environment_variable_valid_context(self): - with use_span(NonRecordingSpan(SpanContext(1, 2, False))): - actual_context = get_current_span( AwsXRayLambdaPropagator().extract( {}, context=get_current(), getter=DefaultGetter() @@ -82,7 +78,6 @@ def test_extract_no_environment_variable_valid_context(self): }, ) def test_extract_from_environment_variable(self): - actual_context = get_current_span( AwsXRayLambdaPropagator().extract( {}, context=get_current(), getter=DefaultGetter() @@ -108,7 +103,6 @@ def test_extract_from_environment_variable(self): }, ) def test_add_link_from_environment_variable(self): - propagator = AwsXRayLambdaPropagator() default_getter = DefaultGetter() diff --git a/pyproject.toml b/pyproject.toml index c1a64c5240..fd5ee5716f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,41 @@ -[tool.black] +[tool.ruff] +# https://docs.astral.sh/ruff/configuration/ +target-version = "py38" line-length = 79 -exclude = ''' -( - \.git - | \.tox - | venv - | build - | dist -) -''' +extend-exclude = [ + "_template", + "*_pb2*.py*", +] +output-format = "concise" + +[tool.ruff.lint] +# https://docs.astral.sh/ruff/linter/#rule-selection +# pylint: https://github.com/astral-sh/ruff/issues/970 +select = [ + "I", # isort + "F", # pyflakes + "E", # pycodestyle errors + "W", # pycodestyle warnings + "PLC", # pylint convention + "PLE", # pylint error + "Q", # flake8-quotes + "A", # flake8-builtins +] +ignore = [ + "E501", # line-too-long +] + +[tool.ruff.lint.per-file-ignores] +"docs/**/*.*" = ["A001"] + +[tool.ruff.lint.isort] +detect-same-package = false # to not consider instrumentation packages as first-party +known-first-party = ["opentelemetry"] +known-third-party = [ + "psutil", + "pytest", + "redis", + "redis_opentracing", + "opencensus", +] + diff --git a/scripts/eachdist.py b/scripts/eachdist.py index 57d98206b7..b82d16a8ec 100755 --- a/scripts/eachdist.py +++ b/scripts/eachdist.py @@ -238,7 +238,8 @@ def setup_instparser(instparser): ) fmtparser = subparsers.add_parser( - "format", help="Formats all source code with black and isort.", + "format", + help="Formats all source code with black and isort.", ) fmtparser.set_defaults(func=format_args) fmtparser.add_argument( @@ -248,7 +249,8 @@ def setup_instparser(instparser): ) versionparser = subparsers.add_parser( - "version", help="Get the version for a release", + "version", + help="Get the version for a release", ) versionparser.set_defaults(func=version_args) versionparser.add_argument( @@ -268,7 +270,8 @@ def setup_instparser(instparser): ) findparser = subparsers.add_parser( - "find-package", help="Find package path.", + "find-package", + help="Find package path.", ) findparser.set_defaults(func=find_package_args) findparser.add_argument( @@ -294,10 +297,7 @@ def find_targets_unordered(rootpath): continue if subdir.name.startswith(".") or subdir.name.startswith("venv"): continue - if any( - (subdir / marker).exists() - for marker in ("pyproject.toml",) - ): + if any((subdir / marker).exists() for marker in ("pyproject.toml",)): yield subdir else: yield from find_targets_unordered(subdir) @@ -520,23 +520,16 @@ def parse_subargs(parentargs, args): def lint_args(args): - rootdir = str(find_projectroot()) - runsubprocess( args.dry_run, - ("black", "--config", f"{rootdir}/pyproject.toml", ".") - + (("--diff", "--check") if args.check_only else ()), - cwd=rootdir, + ("ruff", "check") + (() if args.check_only else ("--fix",)), check=True, ) runsubprocess( args.dry_run, - ("isort", "--settings-path", f"{rootdir}/.isort.cfg", ".") - + (("--diff", "--check-only") if args.check_only else ()), - cwd=rootdir, + ("ruff", "format") + (("--check",) if args.check_only else ()), check=True, ) - runsubprocess(args.dry_run, ("flake8", "--config", f"{rootdir}/.flake8", rootdir), check=True) execute_args( parse_subargs( args, ("exec", "pylint {}", "--all", "--mode", "lintroots") @@ -545,7 +538,11 @@ def lint_args(args): execute_args( parse_subargs( args, - ("exec", "python scripts/check_for_valid_readme.py {}", "--all",), + ( + "exec", + "python scripts/check_for_valid_readme.py {}", + "--all", + ), ) ) @@ -585,9 +582,7 @@ def update_changelogs(version): ## [{version}](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v{version}) - {today} -""".format( - version=version, today=today - ) +""".format(version=version, today=today) errors = False try: update_changelog("./CHANGELOG.md", version, new_entry) @@ -634,7 +629,10 @@ def update_version_files(targets, version, packages): print("updating version.py files") targets = filter_packages(targets, packages) update_files( - targets, "version.py", "__version__ .*", f'__version__ = "{version}"', + targets, + "version.py", + "__version__ .*", + f'__version__ = "{version}"', ) @@ -652,7 +650,7 @@ def update_dependencies(targets, version, packages): update_files( targets, "pyproject.toml", - fr"({package_name}.*)==(.*)", + rf"({package_name}.*)==(.*)", r"\1== " + version + '",', ) @@ -690,14 +688,18 @@ def release_args(args): updated_versions = [] excluded = cfg["exclude_release"]["packages"].split() - targets = [target for target in targets if basename(target) not in excluded] + targets = [ + target for target in targets if basename(target) not in excluded + ] for group in versions.split(","): mcfg = cfg[group] version = mcfg["version"] updated_versions.append(version) packages = None if "packages" in mcfg: - packages = [pkg for pkg in mcfg["packages"].split() if pkg not in excluded] + packages = [ + pkg for pkg in mcfg["packages"].split() if pkg not in excluded + ] print(f"update {group} packages to {version}") update_dependencies(targets, version, packages) update_version_files(targets, version, packages) @@ -724,16 +726,15 @@ def format_args(args): format_dir = str(find_projectroot()) if args.path: format_dir = os.path.join(format_dir, args.path) - root_dir = str(find_projectroot()) runsubprocess( args.dry_run, - ("black", "--config", f"{root_dir}/pyproject.toml", "."), + ("ruff", "check", "--fix"), cwd=format_dir, check=True, ) runsubprocess( args.dry_run, - ("isort", "--settings-path", f"{root_dir}/.isort.cfg", "--profile", "black", "."), + ("ruff", "format"), cwd=format_dir, check=True, ) @@ -763,6 +764,7 @@ def version_args(args): print("package not found") sys.exit(1) + def find_package_args(args): root = find_projectroot() for package in find_targets_unordered(root): @@ -774,6 +776,7 @@ def find_package_args(args): print("package not found") sys.exit(1) + def main(): args = parse_args() args.func(args) diff --git a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py index d02febca10..bfb3aa1f48 100644 --- a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py +++ b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py @@ -17,15 +17,13 @@ import redis import redis.asyncio - -from redis.exceptions import ResponseError -from redis.commands.search.indexDefinition import IndexDefinition, IndexType -from redis.commands.search.aggregation import AggregateRequest -from redis.commands.search.query import Query from redis.commands.search.field import ( TextField, VectorField, ) +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import Query +from redis.exceptions import ResponseError from opentelemetry import trace from opentelemetry.instrumentation.redis import RedisInstrumentor @@ -644,39 +642,49 @@ def prepare_data(self): self.redis_client.ft("idx:test_vss").dropindex(True) except ResponseError: print("No such index") - item = {"name": "test", - "value": "test_value", - "embeddings": [0.1] * 256} + item = { + "name": "test", + "value": "test_value", + "embeddings": [0.1] * 256, + } pipeline = self.redis_client.pipeline() - pipeline.json().set(f"test:001", "$", item) + pipeline.json().set("test:001", "$", item) res = pipeline.execute() assert False not in res def create_index(self): - schema = ( - TextField("$.name", no_stem=True, as_name="name"), - TextField("$.value", no_stem=True, as_name="value"), - VectorField("$.embeddings", - "FLAT", - { - "TYPE": "FLOAT32", - "DIM": self.embedding_dim, - "DISTANCE_METRIC": "COSINE", - }, - as_name="vector",), - ) - definition = IndexDefinition(prefix=["test:"], index_type=IndexType.JSON) - res = self.redis_client.ft("idx:test_vss").create_index(fields=schema, definition=definition) + schema = ( + TextField("$.name", no_stem=True, as_name="name"), + TextField("$.value", no_stem=True, as_name="value"), + VectorField( + "$.embeddings", + "FLAT", + { + "TYPE": "FLOAT32", + "DIM": self.embedding_dim, + "DISTANCE_METRIC": "COSINE", + }, + as_name="vector", + ), + ) + definition = IndexDefinition( + prefix=["test:"], index_type=IndexType.JSON + ) + res = self.redis_client.ft("idx:test_vss").create_index( + fields=schema, definition=definition + ) assert "OK" in str(res) def test_redis_create_index(self): spans = self.memory_exporter.get_finished_spans() - span = next(span for span in spans if span.name == "redis.create_index") + span = next( + span for span in spans if span.name == "redis.create_index" + ) assert "redis.create_index.fields" in span.attributes def test_redis_query(self): query = "@name:test" - res = self.redis_client.ft("idx:test_vss").search(Query(query)) + self.redis_client.ft("idx:test_vss").search(Query(query)) spans = self.memory_exporter.get_finished_spans() span = next(span for span in spans if span.name == "redis.search") diff --git a/tox.ini b/tox.ini index cec851872e..62c205513e 100644 --- a/tox.ini +++ b/tox.ini @@ -6,6 +6,11 @@ envlist = ; Environments are organized by individual package, allowing ; for specifying supported Python versions per package. + ; instrumentation-openai + py3{8,9,10,11,12}-test-instrumentation-openai-v2 + pypy3-test-instrumentation-openai-v2 + lint-instrumentation-openai-v2 + ; opentelemetry-resource-detector-container py3{8,9,10,11,12}-test-resource-detector-container pypy3-test-resource-detector-container @@ -379,6 +384,7 @@ envlist = generate generate-workflows shellcheck + ruff [testenv] deps = @@ -404,6 +410,12 @@ commands_pre = opentelemetry-instrumentation: pip install opentelemetry-test-utils@{env:CORE_REPO}\#egg=opentelemetry-test-utils&subdirectory=tests/opentelemetry-test-utils opentelemetry-instrumentation: pip install -r {toxinidir}/opentelemetry-instrumentation/test-requirements.txt + openai: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api + openai: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions + openai: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk + openai: pip install opentelemetry-test-utils@{env:CORE_REPO}\#egg=opentelemetry-test-utils&subdirectory=tests/opentelemetry-test-utils + openai: pip install -r {toxinidir}/instrumentation/opentelemetry-instrumentation-openai-v2/test-requirements.txt + distro: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api distro: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions distro: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk @@ -456,6 +468,11 @@ commands_pre = kafka-pythonng: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk kafka-pythonng: pip install -r {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python/test-requirements-ng.txt + openai: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api + openai: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk + openai: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions + openai: pip install -r {toxinidir}/instrumentation/opentelemetry-instrumentation-openai-v2/test-requirements.txt + confluent-kafka: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api confluent-kafka: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions confluent-kafka: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk @@ -804,362 +821,185 @@ commands_pre = commands = test-distro: pytest {toxinidir}/opentelemetry-distro/tests {posargs} - lint-distro: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-distro - lint-distro: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-distro - lint-distro: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-distro lint-distro: pylint {toxinidir}/opentelemetry-distro test-opentelemetry-instrumentation: pytest {toxinidir}/opentelemetry-instrumentation/tests {posargs} - lint-opentelemetry-instrumentation: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-instrumentation - lint-opentelemetry-instrumentation: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-instrumentation - lint-opentelemetry-instrumentation: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-instrumentation lint-opentelemetry-instrumentation: pylint {toxinidir}/opentelemetry-instrumentation test-instrumentation-aiohttp-client: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-client/tests {posargs} - lint-instrumentation-aiohttp-client: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-client - lint-instrumentation-aiohttp-client: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-client - lint-instrumentation-aiohttp-client: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-client lint-instrumentation-aiohttp-client: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aiohttp-client" test-instrumentation-aiohttp-server: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-server/tests {posargs} - lint-instrumentation-aiohttp-server: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-server - lint-instrumentation-aiohttp-server: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-server - lint-instrumentation-aiohttp-server: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aiohttp-server lint-instrumentation-aiohttp-server: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aiohttp-server" test-instrumentation-aiopg: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aiopg/tests {posargs} - lint-instrumentation-aiopg: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aiopg - lint-instrumentation-aiopg: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aiopg - lint-instrumentation-aiopg: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aiopg lint-instrumentation-aiopg: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aiopg" test-instrumentation-asgi: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-asgi/tests {posargs} - lint-instrumentation-asgi: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-asgi - lint-instrumentation-asgi: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-asgi - lint-instrumentation-asgi: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-asgi lint-instrumentation-asgi: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-asgi" test-instrumentation-asyncpg: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncpg/tests {posargs} - lint-instrumentation-asyncpg: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncpg - lint-instrumentation-asyncpg: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncpg - lint-instrumentation-asyncpg: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncpg lint-instrumentation-asyncpg: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-asyncpg" test-instrumentation-aws-lambda: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aws-lambda/tests {posargs} - lint-instrumentation-aws-lambda: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aws-lambda - lint-instrumentation-aws-lambda: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aws-lambda - lint-instrumentation-aws-lambda: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aws-lambda lint-instrumentation-aws-lambda: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aws-lambda" test-instrumentation-boto: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-boto/tests {posargs} - lint-instrumentation-boto: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-boto - lint-instrumentation-boto: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-boto - lint-instrumentation-boto: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-boto lint-instrumentation-boto: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-boto" test-instrumentation-botocore: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/tests {posargs} - lint-instrumentation-botocore: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore - lint-instrumentation-botocore: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore - lint-instrumentation-botocore: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore lint-instrumentation-botocore: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-botocore" test-instrumentation-boto3sqs: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-boto3sqs/tests {posargs} - lint-instrumentation-boto3sqs: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-boto3sqs - lint-instrumentation-boto3sqs: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-boto3sqs - lint-instrumentation-boto3sqs: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-boto3sqs lint-instrumentation-boto3sqs: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-boto3sqs" test-instrumentation-cassandra: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra/tests {posargs} - lint-instrumentation-cassandra: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra - lint-instrumentation-cassandra: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra - lint-instrumentation-cassandra: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra lint-instrumentation-cassandra: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-cassandra" test-instrumentation-celery: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-celery/tests {posargs} - lint-instrumentation-celery: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-celery - lint-instrumentation-celery: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-celery - lint-instrumentation-celery: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-celery lint-instrumentation-celery: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-celery" test-instrumentation-dbapi: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-dbapi/tests {posargs} - lint-instrumentation-dbapi: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-dbapi - lint-instrumentation-dbapi: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-dbapi - lint-instrumentation-dbapi: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-dbapi lint-instrumentation-dbapi: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-dbapi" test-instrumentation-django: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-django/tests {posargs} - lint-instrumentation-django: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-django - lint-instrumentation-django: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-django - lint-instrumentation-django: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-django lint-instrumentation-django: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-django" test-instrumentation-elasticsearch: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-elasticsearch/tests {posargs} - lint-instrumentation-elasticsearch: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-elasticsearch - lint-instrumentation-elasticsearch: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-elasticsearch - lint-instrumentation-elasticsearch: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-elasticsearch lint-instrumentation-elasticsearch: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-elasticsearch" test-instrumentation-falcon: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-falcon/tests {posargs} - lint-instrumentation-falcon: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-falcon - lint-instrumentation-falcon: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-falcon - lint-instrumentation-falcon: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-falcon lint-instrumentation-falcon: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-falcon" test-instrumentation-fastapi: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-fastapi/tests {posargs} - lint-instrumentation-fastapi: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-fastapi - lint-instrumentation-fastapi: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-fastapi - lint-instrumentation-fastapi: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-fastapi lint-instrumentation-fastapi: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-fastapi" test-instrumentation-flask: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-flask/tests {posargs} - lint-instrumentation-flask: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-flask - lint-instrumentation-flask: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-flask - lint-instrumentation-flask: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-flask lint-instrumentation-flask: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-flask" test-instrumentation-urllib: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib/tests {posargs} - lint-instrumentation-urllib: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib - lint-instrumentation-urllib: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib - lint-instrumentation-urllib: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib lint-instrumentation-urllib: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-urllib" test-instrumentation-urllib3: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3/tests {posargs} - lint-instrumentation-urllib3: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3 - lint-instrumentation-urllib3: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3 - lint-instrumentation-urllib3: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3 lint-instrumentation-urllib3: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-urllib3" test-instrumentation-grpc: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-grpc/tests {posargs} - lint-instrumentation-grpc: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-grpc - lint-instrumentation-grpc: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-grpc - lint-instrumentation-grpc: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-grpc lint-instrumentation-grpc: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-grpc" test-instrumentation-jinja2: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-jinja2/tests {posargs} - lint-instrumentation-jinja2: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-jinja2 - lint-instrumentation-jinja2: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-jinja2 - lint-instrumentation-jinja2: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-jinja2 lint-instrumentation-jinja2: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-jinja2" test-instrumentation-aiokafka: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka/tests {posargs} - lint-instrumentation-aiokafka: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka - lint-instrumentation-aiokafka: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka - lint-instrumentation-aiokafka: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka lint-instrumentation-aiokafka: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aiokafka" test-instrumentation-kafka-python: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python/tests {posargs} - lint-instrumentation-kafka-python: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python - lint-instrumentation-kafka-python: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python - lint-instrumentation-kafka-python: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python lint-instrumentation-kafka-python: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-kafka-python" ; Test only for kafka-pythonng instrumentation as the only difference between kafka-python and kafka-pythonng is the version of kafka-python test-instrumentation-kafka-pythonng: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-kafka-python/tests {posargs} test-instrumentation-confluent-kafka: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests {posargs} - lint-instrumentation-confluent-kafka: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-confluent-kafka - lint-instrumentation-confluent-kafka: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-confluent-kafka - lint-instrumentation-confluent-kafka: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-confluent-kafka lint-instrumentation-confluent-kafka: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-confluent-kafka" test-instrumentation-logging: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-logging/tests {posargs} - lint-instrumentation-logging: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-logging - lint-instrumentation-logging: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-logging - lint-instrumentation-logging: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-logging lint-instrumentation-logging: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-logging" test-instrumentation-mysql: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-mysql/tests {posargs} - lint-instrumentation-mysql: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-mysql - lint-instrumentation-mysql: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-mysql - lint-instrumentation-mysql: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-mysql lint-instrumentation-mysql: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-mysql" test-instrumentation-mysqlclient: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-mysqlclient/tests {posargs} - lint-instrumentation-mysqlclient: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-mysqlclient - lint-instrumentation-mysqlclient: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-mysqlclient - lint-instrumentation-mysqlclient: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-mysqlclient lint-instrumentation-mysqlclient: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-mysqlclient" test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} - lint-instrumentation-sio-pika: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-pika - lint-instrumentation-sio-pika: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-pika - lint-instrumentation-sio-pika: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-pika lint-instrumentation-sio-pika: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pika" test-instrumentation-aio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-aio-pika/tests {posargs} - lint-instrumentation-aio-pika: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-aio-pika - lint-instrumentation-aio-pika: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-aio-pika - lint-instrumentation-aio-pika: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-aio-pika lint-instrumentation-aio-pika: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-aio-pika" test-instrumentation-psycopg: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg/tests {posargs} - lint-instrumentation-psycopg: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg - lint-instrumentation-psycopg: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg - lint-instrumentation-psycopg: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg lint-instrumentation-psycopg: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-psycopg" test-instrumentation-psycopg2: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2/tests {posargs} - lint-instrumentation-psycopg2: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2 - lint-instrumentation-psycopg2: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2 - lint-instrumentation-psycopg2: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-psycopg2 lint-instrumentation-psycopg2: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-psycopg2" test-instrumentation-pymemcache: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymemcache/tests {posargs} - lint-instrumentation-pymemcache: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-pymemcache - lint-instrumentation-pymemcache: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-pymemcache - lint-instrumentation-pymemcache: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-pymemcache lint-instrumentation-pymemcache: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymemcache" test-instrumentation-pymongo: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymongo/tests {posargs} - lint-instrumentation-pymongo: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-pymongo - lint-instrumentation-pymongo: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-pymongo - lint-instrumentation-pymongo: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-pymongo lint-instrumentation-pymongo: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymongo" test-instrumentation-pymysql: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql/tests {posargs} - lint-instrumentation-pymysql: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql - lint-instrumentation-pymysql: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql - lint-instrumentation-pymysql: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-pymysql lint-instrumentation-pymysql: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pymysql" test-instrumentation-pyramid: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid/tests {posargs} - lint-instrumentation-pyramid: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid - lint-instrumentation-pyramid: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid - lint-instrumentation-pyramid: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-pyramid lint-instrumentation-pyramid: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pyramid" test-instrumentation-redis: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-redis/tests {posargs} - lint-instrumentation-redis: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-redis - lint-instrumentation-redis: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-redis - lint-instrumentation-redis: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-redis lint-instrumentation-redis: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-redis" test-instrumentation-remoulade: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-remoulade/tests {posargs} - lint-instrumentation-remoulade: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-remoulade - lint-instrumentation-remoulade: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-remoulade - lint-instrumentation-remoulade: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-remoulade lint-instrumentation-remoulade: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-remoulade" test-instrumentation-requests: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-requests/tests {posargs} - lint-instrumentation-requests: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-requests - lint-instrumentation-requests: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-requests - lint-instrumentation-requests: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-requests lint-instrumentation-requests: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-requests" test-instrumentation-sqlalchemy: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests {posargs} - lint-instrumentation-sqlalchemy: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlalchemy - lint-instrumentation-sqlalchemy: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlalchemy - lint-instrumentation-sqlalchemy: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlalchemy lint-instrumentation-sqlalchemy: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-sqlalchemy" test-instrumentation-sqlite3: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlite3/tests {posargs} - lint-instrumentation-sqlite3: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlite3 - lint-instrumentation-sqlite3: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlite3 - lint-instrumentation-sqlite3: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-sqlite3 lint-instrumentation-sqlite3: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-sqlite3" test-instrumentation-starlette: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-starlette/tests {posargs} - lint-instrumentation-starlette: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-starlette - lint-instrumentation-starlette: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-starlette - lint-instrumentation-starlette: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-starlette lint-instrumentation-starlette: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-starlette" test-instrumentation-system-metrics: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-system-metrics/tests {posargs} - lint-instrumentation-system-metrics: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-system-metrics - lint-instrumentation-system-metrics: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-system-metrics - lint-instrumentation-system-metrics: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-system-metrics lint-instrumentation-system-metrics: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-system-metrics" test-instrumentation-threading: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-threading/tests {posargs} - lint-instrumentation-threading: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-threading - lint-instrumentation-threading: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-threading - lint-instrumentation-threading: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-threading lint-instrumentation-threading: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-threading" test-instrumentation-tornado: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-tornado/tests {posargs} - lint-instrumentation-tornado: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-tornado - lint-instrumentation-tornado: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-tornado - lint-instrumentation-tornado: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-tornado lint-instrumentation-tornado: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-tornado" test-instrumentation-tortoiseorm: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-tortoiseorm/tests {posargs} - lint-instrumentation-tortoiseorm: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-tortoiseorm - lint-instrumentation-tortoiseorm: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-tortoiseorm - lint-instrumentation-tortoiseorm: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-tortoiseorm lint-instrumentation-tortoiseorm: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-tortoiseorm" test-instrumentation-wsgi: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-wsgi/tests {posargs} - lint-instrumentation-wsgi: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-wsgi - lint-instrumentation-wsgi: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-wsgi - lint-instrumentation-wsgi: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-wsgi lint-instrumentation-wsgi: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-wsgi" test-instrumentation-httpx: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-httpx/tests {posargs} - lint-instrumentation-httpx: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-httpx - lint-instrumentation-httpx: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-httpx - lint-instrumentation-httpx: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-httpx lint-instrumentation-httpx: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-httpx" test-instrumentation-asyncio: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncio/tests {posargs} - lint-instrumentation-asyncio: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncio - lint-instrumentation-asyncio: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncio - lint-instrumentation-asyncio: flake8 --config {toxinidir}/.flake8 {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncio lint-instrumentation-asyncio: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-asyncio" test-util-http: pytest {toxinidir}/util/opentelemetry-util-http/tests {posargs} - lint-util-http: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/util/opentelemetry-util-http - lint-util-http: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/util/opentelemetry-util-http - lint-util-http: flake8 --config {toxinidir}/.flake8 {toxinidir}/util/opentelemetry-util-http lint-util-http: sh -c "cd util && pylint --rcfile ../.pylintrc opentelemetry-util-http" test-sdk-extension-aws: pytest {toxinidir}/sdk-extension/opentelemetry-sdk-extension-aws/tests {posargs} - lint-sdk-extension-aws: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/sdk-extension/opentelemetry-sdk-extension-aws - lint-sdk-extension-aws: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/sdk-extension/opentelemetry-sdk-extension-aws - lint-sdk-extension-aws: flake8 --config {toxinidir}/.flake8 {toxinidir}/sdk-extension/opentelemetry-sdk-extension-aws lint-sdk-extension-aws: sh -c "cd sdk-extension && pylint --rcfile ../.pylintrc opentelemetry-sdk-extension-aws" benchmark-sdk-extension-aws: pytest {toxinidir}/sdk-extension/opentelemetry-sdk-extension-aws/benchmarks {posargs} --benchmark-json=sdk-extension-aws-benchmark.json test-resource-detector-container: pytest {toxinidir}/resource/opentelemetry-resource-detector-container/tests {posargs} - lint-resource-detector-container: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/resource/opentelemetry-resource-detector-container - lint-resource-detector-container: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/resource/opentelemetry-resource-detector-container - lint-resource-detector-container: flake8 --config {toxinidir}/.flake8 {toxinidir}/resource/opentelemetry-resource-detector-container lint-resource-detector-container: sh -c "cd resource && pylint --rcfile ../.pylintrc opentelemetry-resource-detector-container" test-resource-detector-azure: pytest {toxinidir}/resource/opentelemetry-resource-detector-azure/tests {posargs} - lint-resource-detector-azure: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/resource/opentelemetry-resource-detector-azure - lint-resource-detector-azure: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/resource/opentelemetry-resource-detector-azure - lint-resource-detector-azure: flake8 --config {toxinidir}/.flake8 {toxinidir}/resource/opentelemetry-resource-detector-azure lint-resource-detector-azure: sh -c "cd resource && pylint --rcfile ../.pylintrc opentelemetry-resource-detector-azure" test-processor-baggage: pytest {toxinidir}/processor/opentelemetry-processor-baggage/tests {posargs} - lint-processor-baggage: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/processor/opentelemetry-processor-baggage - lint-processor-baggage: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/processor/opentelemetry-processor-baggage - lint-processor-baggage: flake8 --config {toxinidir}/.flake8 {toxinidir}/processor/opentelemetry-processor-baggage lint-processor-baggage: sh -c "cd processor && pylint --rcfile ../.pylintrc opentelemetry-processor-baggage" test-propagator-aws-xray: pytest {toxinidir}/propagator/opentelemetry-propagator-aws-xray/tests {posargs} - lint-propagator-aws-xray: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/propagator/opentelemetry-propagator-aws-xray - lint-propagator-aws-xray: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/propagator/opentelemetry-propagator-aws-xray - lint-propagator-aws-xray: flake8 --config {toxinidir}/.flake8 {toxinidir}/propagator/opentelemetry-propagator-aws-xray lint-propagator-aws-xray: sh -c "cd propagator && pylint --rcfile ../.pylintrc opentelemetry-propagator-aws-xray" benchmark-propagator-aws-xray: pytest {toxinidir}/propagator/opentelemetry-propagator-aws-xray/benchmarks {posargs} --benchmark-json=propagator-aws-xray-benchmark.json test-propagator-ot-trace: pytest {toxinidir}/propagator/opentelemetry-propagator-ot-trace/tests {posargs} - lint-propagator-ot-trace: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/propagator/opentelemetry-propagator-ot-trace - lint-propagator-ot-trace: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/propagator/opentelemetry-propagator-ot-trace - lint-propagator-ot-trace: flake8 --config {toxinidir}/.flake8 {toxinidir}/propagator/opentelemetry-propagator-ot-trace lint-propagator-ot-trace: sh -c "cd propagator && pylint --rcfile ../.pylintrc opentelemetry-propagator-ot-trace" test-exporter-richconsole: pytest {toxinidir}/exporter/opentelemetry-exporter-richconsole/tests {posargs} - lint-exporter-richconsole: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-richconsole - lint-exporter-richconsole: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-richconsole - lint-exporter-richconsole: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-richconsole lint-exporter-richconsole: sh -c "cd exporter && pylint --rcfile ../.pylintrc opentelemetry-exporter-richconsole" test-exporter-prometheus-remote-write: pytest {toxinidir}/exporter/opentelemetry-exporter-prometheus-remote-write/tests {posargs} - lint-exporter-prometheus-remote-write: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-prometheus-remote-write - lint-exporter-prometheus-remote-write: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-prometheus-remote-write - lint-exporter-prometheus-remote-write: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-prometheus-remote-write lint-exporter-prometheus-remote-write: sh -c "cd exporter && pylint --rcfile ../.pylintrc opentelemetry-exporter-prometheus-remote-write" coverage: {toxinidir}/scripts/coverage.sh @@ -1182,17 +1022,6 @@ changedir = docs commands = sphinx-build -E -a -W -b html -T . _build/html -[testenv:lint] -basepython: python3 -recreate = True -deps = - -r dev-requirements.txt - -commands = - black --config {toxinidir}/pyproject.toml {toxinidir} --diff --check - isort --settings-path {toxinidir}/.isort.cfg {toxinidir} --diff --check-only - flake8 --config {toxinidir}/.flake8 {toxinidir} - [testenv:spellcheck] basepython: python3 recreate = True @@ -1348,3 +1177,11 @@ commands_pre = commands = sh -c "find {toxinidir} -name \*.sh | xargs shellcheck --severity=warning" + +[testenv:ruff] +basepython: python3 +deps = + -c {toxinidir}/dev-requirements.txt + pre-commit +commands = + pre-commit run --color=always --all-files {posargs}