Skip to content

Commit

Permalink
0.2.1 Better AI API and special error handling, performance and other…
Browse files Browse the repository at this point in the history
… optimizations
  • Loading branch information
matebenyovszky committed Oct 31, 2024
1 parent 133db0b commit 179b937
Show file tree
Hide file tree
Showing 10 changed files with 332 additions and 205 deletions.
20 changes: 1 addition & 19 deletions .github/workflows/python-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,25 +7,7 @@ on:
workflow_dispatch: # Allow manual trigger

jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install -e .
- name: Run tests
run: |
pytest
build-and-publish:
needs: test
runs-on: ubuntu-latest
environment:
name: release
Expand Down Expand Up @@ -56,4 +38,4 @@ jobs:
# Publish to PyPI only on releases
- name: Publish to PyPI
if: startsWith(github.ref, 'refs/tags/v')
uses: pypa/gh-action-pypi-publish@release/v1
uses: pypa/gh-action-pypi-publish@release/v1
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ ideas.md
.ideas
.initialprompt

tests/healing_agent_exceptions
tests/healing_agent_backups

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
53 changes: 53 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Changelog

All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.2.1] - 2024-10-31
### Added
- Better AI API error handling
- Performance optimizations in healing_agent decorator
- This changelog file

### Changed
- Minor optimizations and bug fixes
- Updated JSONDecodeError handling
- Updated diagram

## [0.2.0] - 2024-10-31
### Added
- New hints feature for better error resolution
- Enhanced handling of different exception types
- Optimized configuration system

### Changed
- Improved overall code structure
- Enhanced error handling capabilities

## [0.1.2] - 2024-10-30
### Added
- Special JSON DECODE error handling

### Changed
- Streamlined import system
- Updated decorator parameters

## [0.1.1] - 2024-10-29
### Changed
- Improved packaging configuration
- Various small updates and optimizations

## [0.1.0] - 2024-10-29
### Added
- Initial release
- Basic error handling functionality
- Core healing agent features
- Basic documentation

[0.2.1]: https://github.com/matebenyovszky/healing-agent/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/matebenyovszky/healing-agent/compare/v0.1.2...v0.2.0
[0.1.2]: https://github.com/matebenyovszky/healing-agent/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/matebenyovszky/healing-agent/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/matebenyovszky/healing-agent/releases/tag/v0.1.0
225 changes: 147 additions & 78 deletions healing_agent/ai_broker.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,51 @@
from typing import Dict
import time
import httpx
import requests
import openai
from functools import wraps

def get_ai_response(prompt: str, config: Dict, system_role: str = "code_fixer") -> str:
"""
Get response from configured AI provider.
Args:
prompt (str): The prompt to send to the AI
config (Dict): Configuration dictionary
system_role (str): Role for system prompt - "code_fixer", "analyzer", or "report"
Returns:
str: The AI generated response
"""
system_prompts = {
"code_fixer": "You are a Python code fixing assistant. Provide only the corrected code without explanations.",
"analyzer": "You are a Python error analysis assistant. Provide clear and concise explanation of the error and suggestions to fix it.",
"report": "You are a Python error reporting assistant. Provide a detailed report of the error, its cause, and the applied fix."
}

system_prompt = system_prompts.get(system_role, system_prompts["code_fixer"])

try:
provider = config.get('AI_PROVIDER', 'azure').lower()

if provider == 'azure':
return _get_azure_response(prompt, config['AZURE'], system_prompt)
elif provider == 'openai':
return _get_openai_response(prompt, config['OPENAI'], system_prompt)
elif provider == 'anthropic':
return _get_anthropic_response(prompt, config['ANTHROPIC'])
elif provider == 'ollama':
return _get_ollama_response(prompt, config['OLLAMA'])
elif provider == 'litellm':
return _get_litellm_response(prompt, config['LITELLM'], system_prompt)
else:
raise ValueError(f"Unsupported AI provider: {provider}")

except Exception as e:
print(f"♣ Error getting AI response: {str(e)}")
raise
def handle_connection_errors(provider_name: str):
"""Simple decorator to handle connection errors with basic logging"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (
httpx.ConnectError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
ConnectionError,
TimeoutError
) as e:
print(f"♣ Connection error in {provider_name}: {str(e)}")
# Wait briefly before retrying
time.sleep(2)
try:
return func(*args, **kwargs)
except Exception as retry_error:
print(f"♣ Retry failed for {provider_name}: {str(retry_error)}")
raise
except openai.APIConnectionError as e:
if 'OpenAI' in provider_name or 'Azure' in provider_name:
print(f"♣ Connection error in {provider_name}: {str(e)}")
# Wait briefly before retrying
time.sleep(2)
try:
return func(*args, **kwargs)
except Exception as retry_error:
print(f"♣ Retry failed for {provider_name}: {str(retry_error)}")
raise
else:
raise
except Exception as e:
print(f"♣ Unexpected error in {provider_name}: {str(e)}")
raise
return wrapper
return decorator

@handle_connection_errors("Azure")
def _get_azure_response(prompt: str, config: Dict, system_prompt: str) -> str:
"""Handle Azure OpenAI API requests"""
import openai
Expand All @@ -49,15 +55,21 @@ def _get_azure_response(prompt: str, config: Dict, system_prompt: str) -> str:
azure_endpoint=config['endpoint']
)

response = client.chat.completions.create(
model=config['deployment_name'],
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content.strip()
try:
response = client.chat.completions.create(
model=config['deployment_name'],
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
timeout=config.get('timeout', 30)
)
return response.choices[0].message.content.strip()
except openai.APIError as e:
print(f"♣ Azure API error: {str(e)}")
raise

@handle_connection_errors("OpenAI")
def _get_openai_response(prompt: str, config: Dict, system_prompt: str) -> str:
"""Handle OpenAI direct API requests"""
import openai
Expand All @@ -66,45 +78,61 @@ def _get_openai_response(prompt: str, config: Dict, system_prompt: str) -> str:
organization=config.get('organization_id')
)

response = client.chat.completions.create(
model=config['model'],
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content.strip()
try:
response = client.chat.completions.create(
model=config['model'],
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
timeout=config.get('timeout', 30)
)
return response.choices[0].message.content.strip()
except openai.APIError as e:
print(f"♣ OpenAI API error: {str(e)}")
raise

@handle_connection_errors("Anthropic")
def _get_anthropic_response(prompt: str, config: Dict) -> str:
"""Handle Anthropic API requests"""
import anthropic
client = anthropic.Anthropic(api_key=config['api_key'])

response = client.messages.create(
model=config['model'],
max_tokens=1000,
messages=[{
"role": "user",
"content": prompt
}]
)
return response.content[0].text
try:
response = client.messages.create(
model=config['model'],
max_tokens=1000,
messages=[{
"role": "user",
"content": prompt
}],
timeout=config.get('timeout', 30)
)
return response.content[0].text
except Exception as e:
print(f"♣ Anthropic API error: {str(e)}")
raise

@handle_connection_errors("Ollama")
def _get_ollama_response(prompt: str, config: Dict) -> str:
"""Handle Ollama API requests"""
import requests

response = requests.post(
f"{config['host']}/api/generate",
json={
"model": config['model'],
"prompt": prompt,
"stream": False
},
timeout=config.get('timeout', 120)
)
return response.json()['response']
try:
response = requests.post(
f"{config['host']}/api/generate",
json={
"model": config['model'],
"prompt": prompt,
"stream": False
},
timeout=config.get('timeout', 120)
)
response.raise_for_status()
return response.json()['response']
except requests.exceptions.RequestException as e:
print(f"♣ Ollama API error: {str(e)}")
raise

@handle_connection_errors("LiteLLM")
def _get_litellm_response(prompt: str, config: Dict, system_prompt: str) -> str:
"""Handle LiteLLM API requests"""
import litellm
Expand All @@ -118,7 +146,8 @@ def _get_litellm_response(prompt: str, config: Dict, system_prompt: str) -> str:
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
api_key=config['api_key']
api_key=config['api_key'],
timeout=config.get('timeout', 30)
)
if not response or not response.choices:
raise ValueError("Invalid response from LiteLLM API - no choices returned")
Expand All @@ -129,5 +158,45 @@ def _get_litellm_response(prompt: str, config: Dict, system_prompt: str) -> str:
return response.choices[0].message.content.strip()

except Exception as e:
print(f"Error calling LiteLLM API: {str(e)}")
print(f"♣ LiteLLM API error: {str(e)}")
raise

def get_ai_response(prompt: str, config: Dict, system_role: str = "code_fixer") -> str:
"""
Get response from configured AI provider.
Args:
prompt (str): The prompt to send to the AI
config (Dict): Configuration dictionary
system_role (str): Role for system prompt - "code_fixer", "analyzer", or "report"
Returns:
str: The AI generated response
"""
system_prompts = {
"code_fixer": "You are a Python code fixing assistant. Provide only the corrected code without explanations.",
"analyzer": "You are a Python error analysis assistant. Provide clear and concise explanation of the error and suggestions to fix it.",
"report": "You are a Python error reporting assistant. Provide a detailed report of the error, its cause, and the applied fix."
}

system_prompt = system_prompts.get(system_role, system_prompts["code_fixer"])

try:
provider = config.get('AI_PROVIDER', 'azure').lower()

if provider == 'azure':
return _get_azure_response(prompt, config['AZURE'], system_prompt)
elif provider == 'openai':
return _get_openai_response(prompt, config['OPENAI'], system_prompt)
elif provider == 'anthropic':
return _get_anthropic_response(prompt, config['ANTHROPIC'])
elif provider == 'ollama':
return _get_ollama_response(prompt, config['OLLAMA'])
elif provider == 'litellm':
return _get_litellm_response(prompt, config['LITELLM'], system_prompt)
else:
raise ValueError(f"Unsupported AI provider: {provider}")

except Exception as e:
print(f"♣ Error getting AI response: {str(e)}")
raise
Loading

0 comments on commit 179b937

Please sign in to comment.