Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Initial Support for Instrumenting OpenAI Python Library - Chat Completion Create #2759

Merged
merged 53 commits into from
Oct 22, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
c7b3c97
[WIP] Initial commit for OpenAI instrumentation
karthikscale3 Jul 31, 2024
52a5f07
Merge branch 'main' into openai-opentelemetry
karthikscale3 Jul 31, 2024
6383978
Merge branch 'main' of github.com:Scale3-Labs/opentelemetry-python-co…
alizenhom Aug 7, 2024
e1bca1a
Loosen openai version for instrumentation + linting
alizenhom Aug 12, 2024
94c10f4
fix wrong patch.py import
alizenhom Aug 12, 2024
bb97ec9
add missing dependecies tiktoken & pydantic
alizenhom Aug 12, 2024
e15d443
remove async support from `StreamWrapper` until further notice
alizenhom Aug 12, 2024
1efdfcd
addressing comments:
alizenhom Aug 13, 2024
892d388
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Aug 13, 2024
e7398a2
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Aug 14, 2024
e601f6d
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Aug 15, 2024
df3fc62
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Sep 5, 2024
d04edad
Refactoring Openai instrumentation
alizenhom Sep 5, 2024
b8dde6c
Merge branch 'openai-opentelemetry' of github.com:Scale3-Labs/opentel…
alizenhom Sep 5, 2024
ec3c320
remove `SpanAttributes` and refactor streamwrapper
alizenhom Sep 5, 2024
706c6f2
change instrumentation name & fix some nits
alizenhom Sep 6, 2024
71aaeb6
change openai package name
alizenhom Sep 6, 2024
8495a24
cleanup setting prompt events & finish reasons
alizenhom Sep 6, 2024
885b7fd
catch connection drops and reraise error in streaming
alizenhom Sep 6, 2024
6ac04cb
run `tox -e generate`
alizenhom Sep 6, 2024
42370a7
run linter
alizenhom Sep 6, 2024
c5ef8c3
run `tox -e generate`
alizenhom Sep 6, 2024
d52460e
add changelog
alizenhom Sep 6, 2024
452d41a
test requirments + tox ini
alizenhom Sep 9, 2024
48fb3fb
Merge branch 'main' of github.com:open-telemetry/opentelemetry-python…
alizenhom Sep 25, 2024
e9a76c4
remove LLMSpanAttributes validation layer
alizenhom Sep 25, 2024
3d5a2b3
add tests
alizenhom Sep 25, 2024
b583aa0
enhance build settings
alizenhom Sep 27, 2024
ae9bc2a
Merge branch 'main' of github.com:open-telemetry/opentelemetry-python…
alizenhom Sep 27, 2024
f2a5cfa
address test comments
alizenhom Sep 27, 2024
e701678
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Oct 1, 2024
a457df2
Merge branch 'main' into openai-opentelemetry
karthikscale3 Oct 7, 2024
3bdfd8f
run `tox -e generate` & `tox -e generate-workflows`
alizenhom Oct 8, 2024
41cbfd0
Update instrumentation/opentelemetry-instrumentation-openai/src/opent…
karthikscale3 Oct 9, 2024
f3b7c0e
Merge branch 'main' into openai-opentelemetry
karthikscale3 Oct 9, 2024
8813754
Merge branch 'main' into openai-opentelemetry
karthikscale3 Oct 16, 2024
578653d
change folder name to v2
alizenhom Oct 17, 2024
578a942
adjust all naming to -v2
alizenhom Oct 17, 2024
51f2438
run `tox -e generate`
alizenhom Oct 17, 2024
8b58f27
adjust tests
alizenhom Oct 17, 2024
d467eb1
set attributes only when span is recording
alizenhom Oct 17, 2024
ad7f198
`model` fallback to `gpt-3.5-turbo`
alizenhom Oct 17, 2024
5e4c2b2
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Oct 19, 2024
bbee109
adjust `-v2` for linting
alizenhom Oct 19, 2024
9ac90f9
make sure span is recording before setting attributes
alizenhom Oct 19, 2024
2549f25
pass span_attributes when creating span inside `start_span`
alizenhom Oct 19, 2024
1dacf8d
adjust unwrap + add pydantic to test reqs
alizenhom Oct 21, 2024
4048410
bump openai support to `1.26.0`
alizenhom Oct 21, 2024
8fc4336
run `tox -e generate` & `tox -e generate-workflows`
alizenhom Oct 21, 2024
9e273f6
add uninstrument in tests + remove any none values from span attributes
alizenhom Oct 22, 2024
8e667de
cleanup
alizenhom Oct 22, 2024
592c18e
adjust `unwrap`
alizenhom Oct 22, 2024
cd8b098
Merge branch 'open-telemetry:main' into openai-opentelemetry
alizenhom Oct 22, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ classifiers = [
]
dependencies = [
"opentelemetry-api ~= 1.12",
"opentelemetry-instrumentation == 0.47b0",
"tiktoken>=0.1.1",
"opentelemetry-instrumentation == 0.48b0.dev",
xrmx marked this conversation as resolved.
Show resolved Hide resolved
karthikscale3 marked this conversation as resolved.
Show resolved Hide resolved
"opentelemetry-semantic-conventions == 0.48b0.dev",
"pydantic>=1.8"

lzchen marked this conversation as resolved.
Show resolved Hide resolved
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.openai.package import _instruments
from opentelemetry.trace import get_tracer
from wrapt import wrap_function_wrapper
from wrapt import wrap_function_wrapper as _W
xrmx marked this conversation as resolved.
Show resolved Hide resolved
from .patch import chat_completions_create


Expand All @@ -59,13 +59,10 @@ def _instrument(self, **kwargs):
"""Enable OpenAI instrumentation."""
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, "", tracer_provider)
lmolkova marked this conversation as resolved.
Show resolved Hide resolved
version = importlib.metadata.version("openai")
wrap_function_wrapper(
"openai.resources.chat.completions",
"Completions.create",
chat_completions_create(
"openai.chat.completions.create", version, tracer
),
_W(
module="openai.resources.chat.completions",
name="Completions.create",
wrapper=chat_completions_create(tracer),
)

def _uninstrument(self, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,54 +12,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import json
from typing import Optional, Union

from opentelemetry import trace
from opentelemetry.trace import SpanKind, Span
from opentelemetry.trace.status import Status, StatusCode
from opentelemetry.trace.propagation import set_span_in_context
from openai import NOT_GIVEN
from .span_attributes import LLMSpanAttributes, SpanAttributes

from .utils import silently_fail, extract_content
from .span_attributes import LLMSpanAttributes, SpanAttributes
from opentelemetry.semconv._incubating.attributes import (
gen_ai_attributes as GenAIAttributes,
)
from .utils import (
silently_fail,
extract_content,
get_llm_request_attributes,
is_streaming,
set_span_attribute,
set_event_completion,
extract_tools_prompt,
)
from opentelemetry.trace import Tracer


def chat_completions_create(original_method, version, tracer: Tracer):
def chat_completions_create(tracer: Tracer):
"""Wrap the `create` method of the `ChatCompletion` class to trace it."""

def traced_method(wrapped, instance, args, kwargs):

llm_prompts = []

for item in kwargs.get("messages", []):
tools = get_tool_calls(item)
if tools is not None:
tool_calls = []
for tool_call in tools:
tool_call_dict = {
"id": tool_call.id if hasattr(tool_call, "id") else "",
"type": (
tool_call.type
if hasattr(tool_call, "type")
else ""
),
}
if hasattr(tool_call, "function"):
tool_call_dict["function"] = {
"name": (
tool_call.function.name
if hasattr(tool_call.function, "name")
else ""
),
"arguments": (
tool_call.function.arguments
if hasattr(tool_call.function, "arguments")
else ""
),
}
tool_calls.append(tool_call_dict)
llm_prompts.append(tool_calls)
else:
llm_prompts.append(item)
tools_prompt = extract_tools_prompt(item)
llm_prompts.append(tools_prompt if tools_prompt else item)

span_attributes = {
**get_llm_request_attributes(kwargs, prompts=llm_prompts),
Expand All @@ -74,7 +58,7 @@ def traced_method(wrapped, instance, args, kwargs):
kind=SpanKind.CLIENT,
context=set_span_in_context(trace.get_current_span()),
lmolkova marked this conversation as resolved.
Show resolved Hide resolved
)
_set_input_attributes(span, kwargs, attributes)
_set_input_attributes(span, attributes)

try:
result = wrapped(*args, **kwargs)
Expand All @@ -86,52 +70,31 @@ def traced_method(wrapped, instance, args, kwargs):
tool_calls=kwargs.get("tools") is not None,
)
else:
_set_response_attributes(span, kwargs, result)
_set_response_attributes(span, result)
span.end()
return result

except Exception as error:
span.set_status(Status(StatusCode.ERROR, str(error)))
xrmx marked this conversation as resolved.
Show resolved Hide resolved
span.set_attribute("error.type", error.__class__.__name__)
karthikscale3 marked this conversation as resolved.
Show resolved Hide resolved
span.end()
raise

return traced_method


def get_tool_calls(item):
if isinstance(item, dict):
return item.get("tool_calls")
else:
return getattr(item, "tool_calls", None)


@silently_fail
def _set_input_attributes(span, kwargs, attributes: LLMSpanAttributes):
tools = []

if (
kwargs.get("functions") is not None
and kwargs.get("functions") != NOT_GIVEN
):
for function in kwargs.get("functions"):
tools.append(
json.dumps({"type": "function", "function": function})
)

if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
tools.append(json.dumps(kwargs.get("tools")))

if tools:
set_span_attribute(span, SpanAttributes.LLM_TOOLS, json.dumps(tools))

def _set_input_attributes(span, attributes: LLMSpanAttributes):
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)


@silently_fail
def _set_response_attributes(span, kwargs, result):
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, result.model)
if hasattr(result, "choices") and result.choices is not None:
def _set_response_attributes(span, result):
set_span_attribute(
span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, result.model
)
if getattr(result, "choices", None):
responses = [
{
"role": (
Expand All @@ -154,120 +117,30 @@ def _set_response_attributes(span, kwargs, result):
]
set_event_completion(span, responses)

if (
hasattr(result, "system_fingerprint")
and result.system_fingerprint is not None
and result.system_fingerprint != NOT_GIVEN
):
if getattr(result, "system_fingerprint", None):
set_span_attribute(
span,
SpanAttributes.LLM_SYSTEM_FINGERPRINT,
karthikscale3 marked this conversation as resolved.
Show resolved Hide resolved
result.system_fingerprint,
)
# Get the usage
if hasattr(result, "usage") and result.usage is not None:
usage = result.usage
if usage is not None:
set_span_attribute(
span,
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
result.usage.prompt_tokens,
)
set_span_attribute(
span,
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
result.usage.completion_tokens,
)
set_span_attribute(
span,
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
result.usage.total_tokens,
)


def set_event_prompt(span: Span, prompt):
span.add_event(
name=SpanAttributes.LLM_CONTENT_PROMPT,
attributes={
SpanAttributes.LLM_PROMPTS: prompt,
},
)


def set_span_attributes(span: Span, attributes: dict):
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)


def set_event_completion(span: Span, result_content):
span.add_event(
name=SpanAttributes.LLM_CONTENT_COMPLETION,
attributes={
SpanAttributes.LLM_COMPLETIONS: json.dumps(result_content),
},
)


def set_span_attribute(span: Span, name, value):
if value is not None:
if value != "" or value != NOT_GIVEN:
if name == SpanAttributes.LLM_PROMPTS:
set_event_prompt(span, value)
else:
span.set_attribute(name, value)
return


def is_streaming(kwargs):
return non_numerical_value_is_set(kwargs.get("stream"))


def non_numerical_value_is_set(value: Optional[Union[bool, str]]):
return bool(value) and value != NOT_GIVEN


def get_llm_request_attributes(
kwargs, prompts=None, model=None, operation_name="chat"
):

user = kwargs.get("user")
if prompts is None:
prompts = (
[{"role": user or "user", "content": kwargs.get("prompt")}]
if "prompt" in kwargs
else None
# Get the usage
if getattr(result, "usage", None):
set_span_attribute(
span,
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
result.usage.prompt_tokens,
)
set_span_attribute(
span,
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
result.usage.completion_tokens,
)
set_span_attribute(
span,
"gen_ai.usage.total_tokens",
result.usage.total_tokens,
karthikscale3 marked this conversation as resolved.
Show resolved Hide resolved
)
top_k = (
kwargs.get("n")
or kwargs.get("k")
or kwargs.get("top_k")
or kwargs.get("top_n")
)

top_p = kwargs.get("p") or kwargs.get("top_p")
tools = kwargs.get("tools")
return {
SpanAttributes.LLM_OPERATION_NAME: operation_name,
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
SpanAttributes.LLM_TOP_K: top_k,
SpanAttributes.LLM_PROMPTS: json.dumps(prompts) if prompts else None,
SpanAttributes.LLM_USER: user,
SpanAttributes.LLM_REQUEST_TOP_P: top_p,
SpanAttributes.LLM_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
SpanAttributes.LLM_SYSTEM_FINGERPRINT: kwargs.get(
"system_fingerprint"
),
SpanAttributes.LLM_PRESENCE_PENALTY: kwargs.get("presence_penalty"),
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"),
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
}


class StreamWrapper:
Expand All @@ -277,7 +150,7 @@ def __init__(
self,
stream,
span,
prompt_tokens=None,
prompt_tokens=0,
function_call=False,
tool_calls=False,
):
Expand All @@ -299,17 +172,17 @@ def cleanup(self):
if self._span_started:
set_span_attribute(
self.span,
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
self.prompt_tokens,
)
set_span_attribute(
self.span,
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
self.completion_tokens,
)
set_span_attribute(
self.span,
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
"gen_ai.usage.total_tokens",
self.prompt_tokens + self.completion_tokens,
)
set_event_completion(
Expand Down Expand Up @@ -346,14 +219,14 @@ def __next__(self):
raise

def process_chunk(self, chunk):
if hasattr(chunk, "model") and chunk.model is not None:
if getattr(chunk, "model", None):
set_span_attribute(
self.span,
SpanAttributes.LLM_RESPONSE_MODEL,
GenAIAttributes.GEN_AI_RESPONSE_MODEL,
chunk.model,
)

if hasattr(chunk, "choices") and chunk.choices is not None:
if getattr(chunk, "choices", None):
content = []
if not self.function_call and not self.tool_calls:
for choice in chunk.choices:
Expand Down Expand Up @@ -383,12 +256,12 @@ def process_chunk(self, chunk):
if content:
self.result_content.append(content[0])

if hasattr(chunk, "text"):
if getattr(chunk, "text", None):
content = [chunk.text]

if content:
self.result_content.append(content[0])

if getattr(chunk, "usage"):
if getattr(chunk, "usage", None):
self.completion_tokens = chunk.usage.completion_tokens
self.prompt_tokens = chunk.usage.prompt_tokens
Loading