Skip to content

Commit

Permalink
docs(ollama_engine): improve docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
AAClause authored and clementb49 committed Feb 15, 2025
1 parent df3ac82 commit dfbae03
Showing 1 changed file with 48 additions and 8 deletions.
56 changes: 48 additions & 8 deletions basilisk/provider_engine/ollama_engine.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Ollama provider engine implementation."""

import json
import logging
from functools import cached_property
Expand All @@ -19,6 +21,8 @@


class OllamaEngine(BaseEngine):
"""Engine implementation for Ollama API integration."""

capabilities: set[ProviderCapability] = {
ProviderCapability.TEXT,
ProviderCapability.IMAGE,
Expand All @@ -27,8 +31,10 @@ class OllamaEngine(BaseEngine):
@cached_property
@measure_time
def models(self) -> list[ProviderAIModel]:
"""
Get models
"""Get Ollama models.
Returns:
A list of provider AI models.
"""
models = []
models_list = self.client.list().models
Expand Down Expand Up @@ -57,8 +63,10 @@ def models(self) -> list[ProviderAIModel]:

@cached_property
def client(self) -> Client:
"""
Get client
"""Get Ollama client.
Returns:
The Ollama client instance.
"""
base_url = self.account.custom_base_url or str(
self.account.provider.base_url
Expand All @@ -73,8 +81,16 @@ def completion(
system_message: Message | None,
**kwargs,
) -> ChatResponse | Iterator[ChatResponse]:
"""
Completion
"""Get completion from Ollama.
Args:
new_block: The new message block.
conversation: The conversation instance.
system_message: The system message, if any.
**kwargs: Additional keyword arguments.
Returns:
The chat response or an iterator of chat responses.
"""
super().completion(new_block, conversation, system_message, **kwargs)
params = {
Expand All @@ -86,12 +102,18 @@ def completion(
# "top_p": new_block.top_p,
"stream": new_block.stream,
}
# if new_block.max_tokens:
# params["max_tokens"] = new_block.max_tokens
params.update(kwargs)
return self.client.chat(**params)

def prepare_message_request(self, message: Message):
"""Prepare message request for Ollama.
Args:
message: The message to prepare.
Returns:
The prepared message request.
"""
super().prepare_message_request(message)
images = []
if message.attachments:
Expand Down Expand Up @@ -120,6 +142,14 @@ def prepare_message_request(self, message: Message):
prepare_message_response = prepare_message_request

def completion_response_with_stream(self, stream):
"""Process a streaming completion response.
Args:
stream: The stream of chat completion responses.
Returns:
An iterator of the completion response content.
"""
for chunk in stream:
content = chunk.get("message", {}).get("content")
if content:
Expand All @@ -128,6 +158,16 @@ def completion_response_with_stream(self, stream):
def completion_response_without_stream(
self, response, new_block: MessageBlock, **kwargs
) -> MessageBlock:
"""Process a non-streaming completion response.
Args:
response: The chat completion response.
new_block: The message block to update with the response.
**kwargs: Additional keyword arguments.
Returns:
The updated message block with the response.
"""
new_block.response = Message(
role=MessageRoleEnum.ASSISTANT,
content=response["message"]["content"],
Expand Down

0 comments on commit dfbae03

Please sign in to comment.