Skip to content

Commit

Permalink
Merge pull request #1459 from mito-ds/readd-logging-mito-ai
Browse files Browse the repository at this point in the history
Re-added expanded logging to Mito AI
  • Loading branch information
aarondr77 authored Jan 2, 2025
2 parents 6194e7a + 5416460 commit 72e7f0c
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 17 deletions.
12 changes: 6 additions & 6 deletions mito-ai/mito_ai/handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,9 @@ async def on_message(self, message: str) -> None:

try:
if request.stream and self._llm.can_stream:
await self._handle_stream_request(request)
await self._handle_stream_request(request, prompt_type=request.type)
else:
await self._handle_request(request)
await self._handle_request(request, prompt_type=request.type)
except Exception as e:
await self.handle_exception(e, request)

Expand Down Expand Up @@ -158,22 +158,22 @@ async def handle_exception(self, e: Exception, request: CompletionRequest):
)
self.reply(reply)

async def _handle_request(self, request: CompletionRequest) -> None:
async def _handle_request(self, request: CompletionRequest, prompt_type: str) -> None:
"""Handle completion request.
Args:
request: The completion request description.
"""
start = time.time()
reply = await self._llm.request_completions(request)
reply = await self._llm.request_completions(request, prompt_type)
self.reply(reply)
latency_ms = round((time.time() - start) * 1000)
self.log.info(f"Completion handler resolved in {latency_ms} ms.")

async def _handle_stream_request(self, request: CompletionRequest) -> None:
async def _handle_stream_request(self, request: CompletionRequest, prompt_type: str) -> None:
"""Handle stream completion request."""
start = time.time()
async for reply in self._llm.stream_completions(request):
async for reply in self._llm.stream_completions(request, prompt_type):
self.reply(reply)
latency_ms = round((time.time() - start) * 1000)
self.log.info(f"Completion streaming completed in {latency_ms} ms.")
Expand Down
2 changes: 1 addition & 1 deletion mito-ai/mito_ai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class AICapabilities:
class CompletionRequest:
"""Message send by the client to request an AI chat response."""

type: Literal["chat", "inline_completion"]
type: Literal["chat", "inline_completion", "codeExplain", "smartDebug", "ai_capabilities"]
"""Message type."""
message_id: str
"""Message UID generated by the client."""
Expand Down
33 changes: 23 additions & 10 deletions mito-ai/mito_ai/providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
MITO_SERVER_NUM_USAGES,
USER_KEY,
log,
log_ai_completion_success,
)

__all__ = ["OpenAIProvider"]
Expand Down Expand Up @@ -215,11 +216,12 @@ def _openAI_client(self) -> Optional[openai.AsyncOpenAI]:

return self._client

async def request_completions(self, request: CompletionRequest) -> CompletionReply:
async def request_completions(self, request: CompletionRequest, prompt_type: str) -> CompletionReply:
"""Get a completion from the OpenAI API.
Args:
request: The completion request description.
prompt_type: The type of prompt that was sent to the AI (e.g. "chat", "smart_debug", "explain")
Returns:
The completion
"""
Expand All @@ -236,7 +238,12 @@ async def request_completions(self, request: CompletionRequest) -> CompletionRep
temperature=self.temperature,
)
# Log the successful completion
log(MITO_AI_COMPLETION_SUCCESS, params={KEY_TYPE_PARAM: USER_KEY})
log_ai_completion_success(
key_type=USER_KEY,
prompt_type=prompt_type,
last_message_content=str(request.messages[-1].get('content', '')),
response={"completion": completion.choices[0].message.content},
)

if len(completion.choices) == 0:
return CompletionReply(
Expand Down Expand Up @@ -287,12 +294,12 @@ async def request_completions(self, request: CompletionRequest) -> CompletionRep
set_user_field(UJ_AI_MITO_API_NUM_USAGES, _num_usages)

# Log the successful completion
log(
MITO_AI_COMPLETION_SUCCESS,
params={
KEY_TYPE_PARAM: MITO_SERVER_KEY,
MITO_SERVER_NUM_USAGES: _num_usages,
},
log_ai_completion_success(
key_type=MITO_SERVER_KEY,
prompt_type=prompt_type,
last_message_content=str(request.messages[-1].get('content', '')),
response={"completion": ai_response},
num_usages=_num_usages,
)

return CompletionReply(
Expand All @@ -311,12 +318,13 @@ async def request_completions(self, request: CompletionRequest) -> CompletionRep
raise

async def stream_completions(
self, request: CompletionRequest
self, request: CompletionRequest, prompt_type: str
) -> AsyncGenerator[Union[CompletionReply, CompletionStreamChunk], None]:
"""Stream completions from the OpenAI API.
Args:
request: The completion request description.
prompt_type: The type of prompt that was sent to the AI (e.g. "chat", "smart_debug", "explain")
Returns:
An async generator yielding first an acknowledge completion reply without
completion and then completion chunks from the third-party provider.
Expand Down Expand Up @@ -346,7 +354,12 @@ async def stream_completions(
temperature=self.temperature,
)
# Log the successful completion
log(MITO_AI_COMPLETION_SUCCESS, params={KEY_TYPE_PARAM: USER_KEY})
log_ai_completion_success(
key_type=USER_KEY,
prompt_type=prompt_type,
last_message_content=str(request.messages[-1].get('content', '')),
response={"completion": "not available for streamed completions"},
)
except BaseException as e:
self.last_error = CompletionError.from_exception(e)
log(MITO_AI_COMPLETION_ERROR, params={KEY_TYPE_PARAM: USER_KEY}, error=e)
Expand Down
3 changes: 3 additions & 0 deletions mito-ai/mito_ai/utils/telemetry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,9 @@ def log_ai_completion_success(
final_params[chunk_key] = chunk_value

log("mito_ai_chat_success", params=final_params)
elif prompt_type == "inline_completion":
final_params = base_params
log("mito_ai_inline_completion_success", params=final_params)
else:
final_params = base_params
final_params["note"] = (
Expand Down

0 comments on commit 72e7f0c

Please sign in to comment.