Skip to content

Commit

Permalink
Merge pull request #82 from microsoft/python
Browse files Browse the repository at this point in the history
added image support and corrected exception tracing
  • Loading branch information
sethjuarez authored Sep 5, 2024
2 parents d917791 + 38c3d71 commit bcc57ab
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 9 deletions.
6 changes: 3 additions & 3 deletions runtime/prompty/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def get_response(customerId, prompt):

```

In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.

## OpenTelemetry Tracing
You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
Expand Down Expand Up @@ -187,10 +187,10 @@ This will produce spans during the execution of the prompt that can be sent to a
The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.

```bash
prompty -s path/to/prompty/file
prompty -s path/to/prompty/file -e .env
```

This will execute the prompt and print the response to the console. It also has default tracing enabled.
This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.

## Contributing
We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
18 changes: 15 additions & 3 deletions runtime/prompty/prompty/azure/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def invoke(self, data: any) -> any:
elif self.api == "completion":
trace("signature", "AzureOpenAI.completions.create")
args = {
"prompt": data.item,
"prompt": data,
"model": self.deployment,
**self.parameters,
}
Expand All @@ -111,10 +111,22 @@ def invoke(self, data: any) -> any:
trace("result", response)

elif self.api == "image":
raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
trace("signature", "AzureOpenAI.images.generate")
args = {
"prompt": data,
"model": self.deployment,
**self.parameters,
}
trace("inputs", args)
response = client.images.generate.create(**args)
trace("result", response)

# stream response
if isinstance(response, Iterator):
return PromptyStream("AzureOpenAIExecutor", response)
if self.api == "chat":
# TODO: handle the case where there might be no usage in the stream
return PromptyStream("AzureOpenAIExecutor", response)
else:
return PromptyStream("AzureOpenAIExecutor", response)
else:
return response
12 changes: 12 additions & 0 deletions runtime/prompty/prompty/azure/processor.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Iterator
from openai.types.completion import Completion
from openai.types.images_response import ImagesResponse
from openai.types.chat.chat_completion import ChatCompletion
from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
from openai.types.create_embedding_response import CreateEmbeddingResponse
Expand Down Expand Up @@ -50,6 +51,17 @@ def invoke(self, data: any) -> any:
return data.data[0].embedding
else:
return [item.embedding for item in data.data]
elif isinstance(data, ImagesResponse):
self.prompty.model.parameters
item: ImagesResponse = data

if len(data.data) == 0:
raise ValueError("Invalid data")
elif len(data.data) == 1:
return data.data[0].url if item.data[0].url else item.data[0].b64_json
else:
return [item.url if item.url else item.b64_json for item in data.data]

elif isinstance(data, Iterator):

def generator():
Expand Down
4 changes: 3 additions & 1 deletion runtime/prompty/prompty/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,9 @@ async def __anext__(self):
# StopIteration is raised
# contents are exhausted
if len(self.items) > 0:
with Tracer.start(f"{self.name}.AsyncPromptyStream") as trace:
with Tracer.start("AsyncPromptyStream") as trace:
trace("signature", f"{self.name}.AsyncPromptyStream")
trace("inputs", "None")
trace("result", [to_dict(s) for s in self.items])

raise StopIteration
4 changes: 3 additions & 1 deletion runtime/prompty/prompty/tracer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import json
import inspect
import traceback
import importlib
import contextlib
from pathlib import Path
Expand Down Expand Up @@ -176,7 +177,8 @@ async def wrapper(*args, **kwargs):
"result",
{
"exception": {
"type": type(e).__name__,
"type": type(e),
"traceback": traceback.format_tb(),
"message": str(e),
"args": to_dict(e.args),
}
Expand Down
5 changes: 4 additions & 1 deletion runtime/prompty/tests/test_tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,5 +151,8 @@ def test_streaming():
result = prompty.execute(
"prompts/streaming.prompty",
)
r = []
for item in result:
print(item)
r.append(item)

return ' '.join(r)

0 comments on commit bcc57ab

Please sign in to comment.