diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md index 2c10498511..4644ee3dc5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- Coerce openai response_format to semconv format + ([#3073](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3073)) - Add example to `opentelemetry-instrumentation-openai-v2` ([#3006](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3006)) - Support for `AsyncOpenAI/AsyncCompletions` ([#2984](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2984)) diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py index cf920c17ee..f8a837259e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py @@ -13,7 +13,7 @@ # limitations under the License. from os import environ -from typing import Optional, Union +from typing import Mapping, Optional, Union from urllib.parse import urlparse from httpx import URL @@ -202,12 +202,23 @@ def get_llm_request_attributes( GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: kwargs.get( "frequency_penalty" ), - GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: kwargs.get( - "response_format" - ), GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED: kwargs.get("seed"), } + if (response_format := kwargs.get("response_format")) is not None: + # response_format may be string or object with a string in the `type` key + if isinstance(response_format, Mapping): + if ( + response_format_type := response_format.get("type") + ) is not None: + attributes[ + GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT + ] = response_format_type + else: + attributes[ + GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT + ] = response_format + set_server_address_and_port(client_instance, attributes) service_tier = kwargs.get("service_tier") attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = ( diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml index 3d13c9344e..37c18b091a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_extra_params.yaml @@ -10,6 +10,9 @@ interactions: ], "model": "gpt-4o-mini", "max_tokens": 50, + "response_format": { + "type": "text" + }, "seed": 42, "stream": false, "temperature": 0.5, @@ -25,7 +28,7 @@ interactions: connection: - keep-alive content-length: - - '183' + - '220' content-type: - application/json host: @@ -45,16 +48,16 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.5 + - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: body: string: |- { - "id": "chatcmpl-ASv9WMTAMZY4O1EImv3csZa6Ch7KI", + "id": "chatcmpl-AbMH3rR6OBMN9hG5w0TRrezuiHLMr", "object": "chat.completion", - "created": 1731456242, + "created": 1733467121, "model": "gpt-4o-mini-2024-07-18", "choices": [ { @@ -84,19 +87,19 @@ interactions: } }, "service_tier": "default", - "system_fingerprint": "fp_0ba0d124f1" + "system_fingerprint": "fp_bba3c8e70b" } headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8e1a8088f867e167-MRS + - 8eda4640ead3e535-KUL Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 13 Nov 2024 00:04:02 GMT + - Fri, 06 Dec 2024 06:38:42 GMT Server: - cloudflare Set-Cookie: test_set_cookie @@ -112,25 +115,25 @@ interactions: - '825' openai-organization: test_openai_org_id openai-processing-ms: - - '488' + - '835' openai-version: - '2020-10-01' strict-transport-security: - max-age=31536000; includeSubDomains; preload x-ratelimit-limit-requests: - - '30000' + - '10000' x-ratelimit-limit-tokens: - - '150000000' + - '200000' x-ratelimit-remaining-requests: - - '29999' + - '9999' x-ratelimit-remaining-tokens: - - '149999943' + - '199943' x-ratelimit-reset-requests: - - 2ms + - 8.64s x-ratelimit-reset-tokens: - - 0s + - 16ms x-request-id: - - req_6df08d6267415e8f5db3628a6757edad + - req_fea877c0a861ff92a6a5217247681f24 status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml index 7cc89ad9b8..87f9673512 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_extra_params.yaml @@ -10,6 +10,9 @@ interactions: ], "model": "gpt-4o-mini", "max_tokens": 50, + "response_format": { + "type": "text" + }, "seed": 42, "stream": false, "temperature": 0.5, @@ -25,13 +28,13 @@ interactions: connection: - keep-alive content-length: - - '183' + - '220' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.54.3 + - OpenAI/Python 1.26.0 x-stainless-arch: - arm64 x-stainless-async: @@ -41,22 +44,20 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.54.3 - x-stainless-retry-count: - - '0' + - 1.26.0 x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.6 + - 3.12.7 method: POST uri: https://api.openai.com/v1/chat/completions response: body: string: |- { - "id": "chatcmpl-ASYMT7913Sp58qhZqQgY7g7Ia2J4M", + "id": "chatcmpl-AbMH70fQA9lMPIClvBPyBSjqJBm9F", "object": "chat.completion", - "created": 1731368633, + "created": 1733467125, "model": "gpt-4o-mini-2024-07-18", "choices": [ { @@ -86,19 +87,17 @@ interactions: } }, "service_tier": "default", - "system_fingerprint": "fp_0ba0d124f1" + "system_fingerprint": "fp_0705bf87c0" } headers: - CF-Cache-Status: - - DYNAMIC CF-RAY: - - 8e1225a3f8e9ce65-SIN + - 8eda465e8fe9e58c-KUL Connection: - keep-alive Content-Type: - application/json Date: - - Mon, 11 Nov 2024 23:43:53 GMT + - Fri, 06 Dec 2024 06:38:46 GMT Server: - cloudflare Set-Cookie: test_set_cookie @@ -110,11 +109,13 @@ interactions: - X-Request-ID alt-svc: - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC content-length: - '825' openai-organization: test_openai_org_id openai-processing-ms: - - '431' + - '558' openai-version: - '2020-10-01' strict-transport-security: @@ -128,11 +129,11 @@ interactions: x-ratelimit-remaining-tokens: - '199943' x-ratelimit-reset-requests: - - 14.746s + - 12.967s x-ratelimit-reset-tokens: - 16ms x-request-id: - - req_81e29a8992ea8001c0240bd990acf0ab + - req_22ff608d47a299f0780f52360631eabb status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py index e19bc7c311..65c596796d 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py @@ -158,6 +158,7 @@ async def test_async_chat_completion_extra_params( max_tokens=50, stream=False, extra_body={"service_tier": "default"}, + response_format={"type": "text"}, ) spans = span_exporter.get_finished_spans() @@ -173,6 +174,12 @@ async def test_async_chat_completion_extra_params( spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER] == "default" ) + assert ( + spans[0].attributes[ + GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT + ] + == "text" + ) @pytest.mark.vcr() diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py index 4f732290c0..4677b7cb95 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py @@ -151,6 +151,7 @@ def test_chat_completion_extra_params( max_tokens=50, stream=False, extra_body={"service_tier": "default"}, + response_format={"type": "text"}, ) spans = span_exporter.get_finished_spans() @@ -166,6 +167,12 @@ def test_chat_completion_extra_params( spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER] == "default" ) + assert ( + spans[0].attributes[ + GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT + ] + == "text" + ) @pytest.mark.vcr() diff --git a/instrumentation/opentelemetry-instrumentation-tornado/test-requirements.txt b/instrumentation/opentelemetry-instrumentation-tornado/test-requirements.txt index c1b4f06db1..2df7df38ec 100644 --- a/instrumentation/opentelemetry-instrumentation-tornado/test-requirements.txt +++ b/instrumentation/opentelemetry-instrumentation-tornado/test-requirements.txt @@ -17,7 +17,7 @@ py-cpuinfo==9.0.0 pytest==7.4.4 requests==2.32.3 tomli==2.0.1 -tornado==6.4.1 +tornado==6.4.2 typing_extensions==4.12.2 urllib3==2.2.2 Werkzeug==3.0.6