Skip to content

Commit

Permalink
Merge pull request xtekky#2698 from kqlio67/main
Browse files Browse the repository at this point in the history
Optimization and bug fixes for PollinationsAI/Blackbox provider: improved error handling, model validation, and HTTP request processing
  • Loading branch information
hlohaus authored Feb 7, 2025
2 parents ff191be + 59c65a0 commit 17a0fd7
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 80 deletions.
4 changes: 2 additions & 2 deletions docs/providers-and-models.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ This document provides an overview of various AI providers and models, including
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|----------|-------------|--------------|---------------|--------|--------|------|------|
|[aichatfree.info](https://aichatfree.info)|No auth required|`g4f.Provider.AIChatFree`|`gemini-1.5-pro` _**(1+)**_||||![](https://img.shields.io/badge/Active-brightgreen)|
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, o3-mini, claude-3.5-sonnet, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+34)**_|`flux`|`blackboxai, gpt-4o, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`||![](https://img.shields.io/badge/Active-brightgreen)|
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, o3-mini, claude-3.5-sonnet, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1, gemini-2.0-flash` _**(+34)**_|`flux`|`blackboxai, gpt-4o, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gemini-2.0-flash`||![](https://img.shields.io/badge/Active-brightgreen)|
|[api.blackbox.ai](https://api.blackbox.ai)|No auth required|`g4f.Provider.BlackboxAPI`|`deepseek-v3, deepseek-r1, deepseek-chat, mixtral-small-28b, dbrx-instruct, qwq-32b, hermes-2-dpo`||||![](https://img.shields.io/badge/Active-brightgreen)|
|[cablyai.com](https://cablyai.com)|Optional API key|`g4f.Provider.CablyAI`|`gpt-4o-mini, llama-3.1-8b, deepseek-v3, deepseek-r1, hermes-3, o3-mini-low, o3-mini, sonar-reasoning` _**(2+)**_||||![](https://img.shields.io/badge/Active-brightgreen)|
|[chatglm.cn](https://chatglm.cn)|No auth required|`g4f.Provider.ChatGLM`|`glm-4`||||![](https://img.shields.io/badge/Active-brightgreen)|
Expand Down Expand Up @@ -157,7 +157,7 @@ This document provides an overview of various AI providers and models, including
|gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)|
|gemini-1.5-flash|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-1.5-pro|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemini-2.0-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-2.0-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[ai.google.dev](https://ai.google.dev/gemini-api/docs/thinking-mode)|
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
Expand Down
8 changes: 5 additions & 3 deletions g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
default_image_model = 'ImageGeneration'
image_models = [default_image_model]
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'Gemini-Flash-2.0']

userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO']
userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0']

agentMode = {
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
Expand All @@ -52,6 +52,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
}

trendingAgentMode = {
Expand Down Expand Up @@ -109,6 +110,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"dbrx-instruct": "DBRX-Instruct",
"qwq-32b": "Qwen-QwQ-32B-Preview",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"gemini-2.0-flash": "Gemini-Flash-2.0",
"flux": "ImageGeneration",
}

Expand Down Expand Up @@ -284,7 +286,7 @@ async def create_async_generator(
"vscodeClient": False,
"codeInterpreterMode": False,
"customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False},
"session": {"user":{"name":"John Doe","email":"john.doe@gmail.com","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'), "status": "PREMIUM"},
"session": {"user":{"name":"John Doe","email":"john.doe@gmail.com","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c","subscriptionStatus":"PREMIUM"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z')},
"webSearchMode": False
}

Expand Down
137 changes: 63 additions & 74 deletions g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,24 +73,23 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
def get_models(cls, **kwargs):
if not cls.text_models or not cls.image_models:
image_url = "https://image.pollinations.ai/models"
image_response = requests.get(image_url)
raise_for_status(image_response)
new_image_models = image_response.json()

cls.image_models = list(dict.fromkeys([*cls.extra_image_models, *new_image_models]))
cls.extra_image_models = cls.image_models.copy()

text_url = "https://text.pollinations.ai/models"
text_response = requests.get(text_url)
raise_for_status(text_response)
original_text_models = [model.get("name") for model in text_response.json()]

combined_text = cls.extra_text_models + [
model for model in original_text_models
if model not in cls.extra_text_models
]
cls.text_models = list(dict.fromkeys(combined_text))
try:
image_response = requests.get("https://image.pollinations.ai/models")
image_response.raise_for_status()
new_image_models = image_response.json()
cls.image_models = list(dict.fromkeys([*cls.extra_image_models, *new_image_models]))

text_response = requests.get("https://text.pollinations.ai/models")
text_response.raise_for_status()
original_text_models = [model.get("name") for model in text_response.json()]

combined_text = cls.extra_text_models + [
model for model in original_text_models
if model not in cls.extra_text_models
]
cls.text_models = list(dict.fromkeys(combined_text))
except Exception as e:
raise RuntimeError(f"Failed to fetch models: {e}") from e

return cls.text_models + cls.image_models

Expand Down Expand Up @@ -122,13 +121,14 @@ async def create_async_generator(
try:
model = cls.get_model(model)
except ModelNotFoundError:
if model not in cls.extra_image_models:
if model not in cls.image_models:
raise

if not cache and seed is None:
seed = random.randint(0, 10000)

if model in cls.image_models or model in cls.extra_image_models:
async for chunk in cls._generate_image(
if model in cls.image_models:
async for chunk in cls._generate_image(
model=model,
prompt=format_image_prompt(messages, prompt),
proxy=proxy,
Expand Down Expand Up @@ -172,25 +172,25 @@ async def _generate_image(
safe: bool
) -> AsyncResult:
params = {
"seed": seed,
"width": width,
"height": height,
"seed": str(seed) if seed is not None else None,
"width": str(width),
"height": str(height),
"model": model,
"nologo": nologo,
"private": private,
"enhance": enhance,
"safe": safe
"nologo": str(nologo).lower(),
"private": str(private).lower(),
"enhance": str(enhance).lower(),
"safe": str(safe).lower()
}
params = {k: json.dumps(v) if isinstance(v, bool) else str(v) for k, v in params.items() if v is not None}
params = "&".join( "%s=%s" % (key, quote_plus(params[key]))
for key in params.keys())
url = f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}?{params}"
params = {k: v for k, v in params.items() if v is not None}
query = "&".join(f"{k}={quote_plus(v)}" for k, v in params.items())
url = f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}?{query}"
yield ImagePreview(url, prompt)

async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async with session.head(url) as response:
if response.status != 500:
await raise_for_status(response)
yield ImageResponse(str(response.url), prompt)
async with session.get(url, allow_redirects=True) as response:
await raise_for_status(response)
image_url = str(response.url)
yield ImageResponse(image_url, prompt)

@classmethod
async def _generate_text(
Expand All @@ -207,60 +207,49 @@ async def _generate_text(
seed: Optional[int],
cache: bool
) -> AsyncResult:
jsonMode = False
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
jsonMode = True
json_mode = False
if response_format and response_format.get("type") == "json_object":
json_mode = True

if images is not None and messages:
if images and messages:
last_message = messages[-1].copy()
last_message["content"] = [
*[{
image_content = [
{
"type": "image_url",
"image_url": {"url": to_data_uri(image)}
} for image, _ in images],
{
"type": "text",
"text": messages[-1]["content"]
}
for image, _ in images
]
last_message["content"] = image_content + [{"type": "text", "text": last_message["content"]}]
messages[-1] = last_message

async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
data = {
data = filter_none(**{
"messages": messages,
"model": model,
"temperature": temperature,
"presence_penalty": presence_penalty,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"jsonMode": jsonMode,
"jsonMode": json_mode,
"stream": False,
"seed": seed,
"cache": cache
}
async with session.post(cls.text_api_endpoint, json=filter_none(**data)) as response:
})

async with session.post(cls.text_api_endpoint, json=data) as response:
await raise_for_status(response)
async for line in response.content:
decoded_chunk = line.decode(errors="replace")
if "data: [DONE]" in decoded_chunk:
break
try:
json_str = decoded_chunk.replace("data:", "").strip()
data = json.loads(json_str)
choice = data["choices"][0]
message = choice.get("message") or choice.get("delta", {})

if "usage" in data:
yield Usage(**data["usage"])
content = message.get("content", "")
if content:
yield content.replace("\\(", "(").replace("\\)", ")")
if "finish_reason" in choice and choice["finish_reason"]:
yield FinishReason(choice["finish_reason"])
break
except json.JSONDecodeError:
yield decoded_chunk.strip()
except Exception as e:
yield FinishReason("error")
break
result = await response.json()
choice = result["choices"][0]
message = choice.get("message", {})
content = message.get("content", "")

if content:
yield content.replace("\\(", "(").replace("\\)", ")")

if "usage" in result:
yield Usage(**result["usage"])

finish_reason = choice.get("finish_reason")
if finish_reason:
yield FinishReason(finish_reason)
2 changes: 1 addition & 1 deletion g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ class VisionModel(Model):
gemini_2_0_flash = Model(
name = 'gemini-2.0-flash',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([PollinationsAI, GeminiPro, Liaobots])
best_provider = IterListProvider([Blackbox, PollinationsAI, GeminiPro, Liaobots])
)

gemini_2_0_flash_thinking = Model(
Expand Down

0 comments on commit 17a0fd7

Please sign in to comment.