mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-22 18:41:41 +03:00
update providers and documentation with image handling improvements (#2451)
* refactor(g4f/Provider/Airforce.py): Enhance Airforce provider with dynamic model fetching * refactor(g4f/Provider/Blackbox.py): Enhance Blackbox AI provider configuration and streamline code * feat(g4f/Provider/RobocodersAPI.py): Add RobocodersAPI new async chat provider * refactor(g4f/client/__init__.py): Improve provider handling in async_generate method * refactor(g4f/models.py): Update provider configurations for multiple models * refactor(g4f/Provider/Blackbox.py): Streamline model configuration and improve response handling * feat(g4f/Provider/DDG.py): Enhance model support and improve conversation handling * refactor(g4f/Provider/Copilot.py): Enhance Copilot provider with model support * refactor(g4f/Provider/AmigoChat.py): update models and improve code structure * chore(g4f/Provider/not_working/AIUncensored.): move AIUncensored to not_working directory * chore(g4f/Provider/not_working/Allyfy.py): remove Allyfy provider * Update (g4f/Provider/not_working/AIUncensored.py g4f/Provider/not_working/__init__.py) * refactor(g4f/Provider/ChatGptEs.py): Implement format_prompt for message handling * refactor(g4f/Provider/Blackbox.py): Update message formatting and improve code structure * refactor(g4f/Provider/LLMPlayground.py): Enhance text generation and error handling * refactor(g4f/Provider/needs_auth/PollinationsAI.py): move PollinationsAI to needs_auth directory * refactor(g4f/Provider/Liaobots.py): Update Liaobots provider models and aliases * feat(g4f/Provider/DeepInfraChat.py): Add new DeepInfra models and aliases * Update (g4f/Provider/__init__.py) * Update (g4f/models.py) * g4f/models.py * Update g4f/models.py * Update g4f/Provider/LLMPlayground.py * Update (g4f/models.py g4f/Provider/Airforce.py g4f/Provider/__init__.py g4f/Provider/LLMPlayground.py) * Update g4f/Provider/__init__.py * refactor(g4f/Provider/Airforce.py): Enhance text generation with retry and timeout * Update g4f/Provider/AmigoChat.py g4f/Provider/__init__.py * refactor(g4f/Provider/Blackbox.py): update model prefixes and image handling Fixes #2445 - Update model prefixes for gpt-4o, gemini-pro, and claude-sonnet-3.5 - Add 'gpt-3.5-turbo' alias for 'blackboxai' model - Modify image handling in create_async_generator method - Add 'imageGenerationMode' and 'webSearchModePrompt' flags to API request - Remove redundant 'imageBase64' field from image data structure * New provider (g4f/Provider/Blackbox2.py) Support for model llama-3.1-70b text generation * docs(docs/async_client.md): update AsyncClient API guide with minor improvements - Improve formatting and readability of code examples - Add line breaks for better visual separation of sections - Fix minor typos and inconsistencies in text - Enhance clarity of explanations in various sections - Remove unnecessary whitespace * feat(docs/client.md): add response_format parameter - Add 'response_format' parameter to image generation examples - Specify 'url' format for standard image generation - Include 'b64_json' format for base64 encoded image response - Update documentation to reflect new parameter usage - Improve code examples for clarity and consistency * docs(README.md): update usage examples and add image generation - Update text generation example to use new Client API - Add image generation example with Client API - Update configuration section with new cookie setting instructions - Add response_format parameter to image generation example - Remove outdated information and reorganize sections - Update contributors list * refactor(g4f/client/__init__.py): optimize image processing and response handling - Modify _process_image_response to handle 'url' format without local saving - Update ImagesResponse construction to include 'created' timestamp - Simplify image processing logic for different response formats - Improve error handling and logging for image generation - Enhance type hints and docstrings for better code clarity * feat(g4f/models.py): update model providers and add new models - Add Blackbox2 to Provider imports - Update gpt-3.5-turbo best provider to Blackbox - Add Blackbox2 to llama-3.1-70b best providers - Rename dalle_3 to dall_e_3 and update its best providers - Add new models: solar_mini, openhermes_2_5, lfm_40b, zephyr_7b, neural_7b, mythomax_13b - Update ModelUtils.convert with new models and changes - Remove duplicate 'dalle-3' entry in ModelUtils.convert * refactor(Airforce): improve API handling and add authentication - Implement API key authentication with check_api_key method - Refactor image generation to use new imagine2 endpoint - Improve text generation with better error handling and streaming - Update model aliases and add new image models - Enhance content filtering for various model outputs - Replace StreamSession with aiohttp's ClientSession for async operations - Simplify model fetching logic and remove redundant code - Add is_image_model method for better model type checking - Update class attributes for better organization and clarity * feat(g4f/Provider/HuggingChat.py): update HuggingChat model list and aliases Request by @TheFirstNoob - Add 'Qwen/Qwen2.5-72B-Instruct' as the first model in the list - Update model aliases to include 'qwen-2.5-72b' - Reorder existing models in the list for consistency - Remove duplicate entry for 'Qwen/Qwen2.5-72B-Instruct' in models list * refactor(g4f/Provider/ReplicateHome.py): remove unused text models Request by @TheFirstNoob - Removed the 'meta/meta-llama-3-70b-instruct' and 'mistralai/mixtral-8x7b-instruct-v0.1' text models from the list - Updated the list to only include the remaining text and image models - This change simplifies the model configuration and reduces the number of available models, focusing on the core text and image models provided by Replicate * refactor(g4f/Provider/HuggingChat.py): Move HuggingChat to needs_auth directory Request by @TheFirstNoob * Update (g4f/Provider/needs_auth/HuggingChat.py) * Update g4f/models.py * Update g4f/Provider/Airforce.py * Update g4f/models.py g4f/Provider/needs_auth/HuggingChat.py * Added 'Airforce' provider to the 'o1-mini' model (g4f/models.py) * Update (g4f/Provider/Airforce.py g4f/Provider/AmigoChat.py) * Update g4f/models.py g4f/Provider/DeepInfraChat.py g4f/Provider/Airforce.py * Update g4f/Provider/DeepInfraChat.py * Update (g4f/Provider/DeepInfraChat.py) * Update g4f/Provider/Blackbox.py * Update (docs/client.md docs/async_client.md g4f/client/__init__.py) * Update (docs/async_client.md docs/client.md) * Update (g4f/client/__init__.py) --------- Co-authored-by: kqlio67 <kqlio67@users.noreply.github.com> Co-authored-by: kqlio67 <> Co-authored-by: H Lohaus <hlohaus@users.noreply.github.com>
This commit is contained in:
parent
6c48dd608b
commit
b198d900aa
@ -196,6 +196,7 @@ client = Client()
|
||||
response = client.images.generate(
|
||||
model="flux",
|
||||
prompt="a white siamese cat",
|
||||
response_format="url"
|
||||
# Add any other necessary parameters
|
||||
)
|
||||
|
||||
|
@ -164,6 +164,11 @@ asyncio.run(main())
|
||||
```
|
||||
|
||||
### Image Generation
|
||||
**The `response_format` parameter is optional and can have the following values:**
|
||||
- **If not specified (default):** The image will be saved locally, and a local path will be returned (e.g., "/images/1733331238_cf9d6aa9-f606-4fea-ba4b-f06576cba309.jpg").
|
||||
- **"url":** Returns a URL to the generated image.
|
||||
- **"b64_json":** Returns the image as a base64-encoded JSON string.
|
||||
|
||||
**Generate images using a specified prompt:**
|
||||
```python
|
||||
import asyncio
|
||||
@ -174,7 +179,9 @@ async def main():
|
||||
|
||||
response = await client.images.generate(
|
||||
prompt="a white siamese cat",
|
||||
model="flux"
|
||||
model="flux",
|
||||
response_format="url"
|
||||
# Add any other necessary parameters
|
||||
)
|
||||
|
||||
image_url = response.data[0].url
|
||||
@ -195,6 +202,7 @@ async def main():
|
||||
prompt="a white siamese cat",
|
||||
model="flux",
|
||||
response_format="b64_json"
|
||||
# Add any other necessary parameters
|
||||
)
|
||||
|
||||
base64_text = response.data[0].b64_json
|
||||
@ -224,7 +232,8 @@ async def main():
|
||||
|
||||
task2 = client.images.generate(
|
||||
model="flux",
|
||||
prompt="a white siamese cat"
|
||||
prompt="a white siamese cat",
|
||||
response_format="url"
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -130,6 +130,11 @@ for chunk in stream:
|
||||
```
|
||||
|
||||
### Image Generation
|
||||
**The `response_format` parameter is optional and can have the following values:**
|
||||
- **If not specified (default):** The image will be saved locally, and a local path will be returned (e.g., "/images/1733331238_cf9d6aa9-f606-4fea-ba4b-f06576cba309.jpg").
|
||||
- **"url":** Returns a URL to the generated image.
|
||||
- **"b64_json":** Returns the image as a base64-encoded JSON string.
|
||||
|
||||
**Generate images using a specified prompt:**
|
||||
```python
|
||||
from g4f.client import Client
|
||||
@ -138,7 +143,8 @@ client = Client()
|
||||
|
||||
response = client.images.generate(
|
||||
model="flux",
|
||||
prompt="a white siamese cat"
|
||||
prompt="a white siamese cat",
|
||||
response_format="url"
|
||||
# Add any other necessary parameters
|
||||
)
|
||||
|
||||
@ -157,6 +163,7 @@ response = client.images.generate(
|
||||
model="flux",
|
||||
prompt="a white siamese cat",
|
||||
response_format="b64_json"
|
||||
# Add any other necessary parameters
|
||||
)
|
||||
|
||||
base64_text = response.data[0].b64_json
|
||||
|
@ -1,18 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
import requests
|
||||
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
from urllib.parse import quote
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import ImageResponse
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
|
||||
def split_message(message: str, max_length: int = 1000) -> list[str]:
|
||||
"""Splits the message into parts up to (max_length)."""
|
||||
@ -30,11 +29,36 @@ def split_message(message: str, max_length: int = 1000) -> list[str]:
|
||||
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://llmplayground.net"
|
||||
api_endpoint_completions = "https://api.airforce/chat/completions"
|
||||
api_endpoint_imagine = "https://api.airforce/imagine2"
|
||||
api_endpoint_imagine2 = "https://api.airforce/imagine2"
|
||||
|
||||
working = True
|
||||
needs_auth = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-4o-mini"
|
||||
default_image_model = "flux"
|
||||
|
||||
additional_models_imagine = ["flux-1.1-pro", "dall-e-3"]
|
||||
|
||||
model_aliases = {
|
||||
# Alias mappings for models
|
||||
"openchat-3.5": "openchat-3.5-0106",
|
||||
"deepseek-coder": "deepseek-coder-6.7b-instruct",
|
||||
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"hermes-2-pro": "hermes-2-pro-mistral-7b",
|
||||
"openhermes-2.5": "openhermes-2.5-mistral-7b",
|
||||
"lfm-40b": "lfm-40b-moe",
|
||||
"discolm-german-7b": "discolm-german-7b-v1",
|
||||
"llama-2-7b": "llama-2-7b-chat-int8",
|
||||
"llama-3.1-70b": "llama-3.1-70b-turbo",
|
||||
"neural-7b": "neural-chat-7b-v3-1",
|
||||
"zephyr-7b": "zephyr-7b-beta",
|
||||
"sdxl": "stable-diffusion-xl-base",
|
||||
"flux-pro": "flux-1.1-pro",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def fetch_completions_models(cls):
|
||||
response = requests.get('https://api.airforce/models', verify=False)
|
||||
@ -44,183 +68,164 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
@classmethod
|
||||
def fetch_imagine_models(cls):
|
||||
response = requests.get('https://api.airforce/imagine/models', verify=False)
|
||||
response = requests.get(
|
||||
'https://api.airforce/imagine/models',
|
||||
'https://api.airforce/v1/imagine2/models',
|
||||
verify=False
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
default_model = "gpt-4o-mini"
|
||||
default_image_model = "flux"
|
||||
additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "flux-1.1-pro"]
|
||||
image_models = fetch_imagine_models.__get__(None, object)() + additional_models_imagine
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if not cls.models:
|
||||
cls.image_models = [*cls.fetch_imagine_models(), *cls.additional_models_imagine]
|
||||
cls.models = [
|
||||
*cls.fetch_completions_models(),
|
||||
*cls.image_models
|
||||
]
|
||||
return cls.models
|
||||
def is_image_model(cls, model: str) -> bool:
|
||||
return model in cls.image_models
|
||||
|
||||
model_aliases = {
|
||||
### completions ###
|
||||
# openchat
|
||||
"openchat-3.5": "openchat-3.5-0106",
|
||||
|
||||
# deepseek-ai
|
||||
"deepseek-coder": "deepseek-coder-6.7b-instruct",
|
||||
|
||||
# NousResearch
|
||||
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"hermes-2-pro": "hermes-2-pro-mistral-7b",
|
||||
|
||||
# teknium
|
||||
"openhermes-2.5": "openhermes-2.5-mistral-7b",
|
||||
|
||||
# liquid
|
||||
"lfm-40b": "lfm-40b-moe",
|
||||
|
||||
# DiscoResearch
|
||||
"german-7b": "discolm-german-7b-v1",
|
||||
|
||||
# meta-llama
|
||||
"llama-2-7b": "llama-2-7b-chat-int8",
|
||||
"llama-2-7b": "llama-2-7b-chat-fp16",
|
||||
"llama-3.1-70b": "llama-3.1-70b-chat",
|
||||
"llama-3.1-8b": "llama-3.1-8b-chat",
|
||||
"llama-3.1-70b": "llama-3.1-70b-turbo",
|
||||
"llama-3.1-8b": "llama-3.1-8b-turbo",
|
||||
|
||||
# inferless
|
||||
"neural-7b": "neural-chat-7b-v3-1",
|
||||
|
||||
# HuggingFaceH4
|
||||
"zephyr-7b": "zephyr-7b-beta",
|
||||
|
||||
|
||||
### imagine ###
|
||||
"sdxl": "stable-diffusion-xl-base",
|
||||
"sdxl": "stable-diffusion-xl-lightning",
|
||||
"flux-pro": "flux-1.1-pro",
|
||||
}
|
||||
models = list(dict.fromkeys([default_model] +
|
||||
fetch_completions_models.__get__(None, object)() +
|
||||
image_models))
|
||||
|
||||
@classmethod
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
prompt: str = None,
|
||||
seed: int = None,
|
||||
size: str = "1:1", # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
|
||||
stream: bool = False,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
if model in cls.image_models:
|
||||
if prompt is None:
|
||||
prompt = messages[-1]['content']
|
||||
return cls._generate_image(model, prompt, proxy, seed, size)
|
||||
else:
|
||||
return cls._generate_text(model, messages, proxy, stream, **kwargs)
|
||||
async def check_api_key(cls, api_key: str) -> bool:
|
||||
"""
|
||||
Always returns True to allow all models.
|
||||
"""
|
||||
if not api_key or api_key == "null":
|
||||
return True # No restrictions if no key.
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
}
|
||||
|
||||
try:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"https://api.airforce/check?key={api_key}") as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return data.get('info') in ['Sponsor key', 'Premium key']
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Error checking API key: {str(e)}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def _generate_image(
|
||||
async def generate_image(
|
||||
cls,
|
||||
model: str,
|
||||
prompt: str,
|
||||
proxy: str = None,
|
||||
seed: int = None,
|
||||
size: str = "1:1",
|
||||
**kwargs
|
||||
api_key: str,
|
||||
size: str,
|
||||
seed: int,
|
||||
proxy: str = None
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"user-agent": "Mozilla/5.0"
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
}
|
||||
if seed is None:
|
||||
seed = random.randint(0, 100000)
|
||||
params = {"model": model, "prompt": prompt, "size": size, "seed": seed}
|
||||
|
||||
async with StreamSession(headers=headers, proxy=proxy) as session:
|
||||
params = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"size": size,
|
||||
"seed": seed
|
||||
}
|
||||
async with session.get(f"{cls.api_endpoint_imagine}", params=params) as response:
|
||||
await raise_for_status(response)
|
||||
content_type = response.headers.get('Content-Type', '').lower()
|
||||
|
||||
if 'application/json' in content_type:
|
||||
raise RuntimeError(await response.json().get("error", {}).get("message"))
|
||||
elif content_type.startswith("image/"):
|
||||
image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={quote(prompt)}&size={size}&seed={seed}"
|
||||
yield ImageResponse(images=image_url, alt=prompt)
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
|
||||
if response.status == 200:
|
||||
image_url = str(response.url)
|
||||
yield ImageResponse(images=image_url, alt=f"Generated image: {prompt}")
|
||||
else:
|
||||
error_text = await response.text()
|
||||
raise RuntimeError(f"Image generation failed: {response.status} - {error_text}")
|
||||
|
||||
@classmethod
|
||||
async def _generate_text(
|
||||
async def generate_text(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
max_tokens: int,
|
||||
temperature: float,
|
||||
top_p: float,
|
||||
stream: bool,
|
||||
api_key: str,
|
||||
proxy: str = None
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||
"Accept": "application/json, text/event-stream",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
}
|
||||
full_message = "\n".join([msg['content'] for msg in messages])
|
||||
message_chunks = split_message(full_message, max_length=1000)
|
||||
|
||||
data = {
|
||||
"messages": [{"role": "user", "content": chunk} for chunk in message_chunks],
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"stream": stream,
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
if stream:
|
||||
async for line in response.content:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith('data: '):
|
||||
try:
|
||||
json_str = line[6:] # Remove 'data: ' prefix
|
||||
chunk = json.loads(json_str)
|
||||
if 'choices' in chunk and chunk['choices']:
|
||||
delta = chunk['choices'][0].get('delta', {})
|
||||
if 'content' in delta:
|
||||
filtered_content = cls._filter_response(delta['content'])
|
||||
yield filtered_content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
else:
|
||||
# Non-streaming response
|
||||
result = await response.json()
|
||||
if 'choices' in result and result['choices']:
|
||||
message = result['choices'][0].get('message', {})
|
||||
content = message.get('content', '')
|
||||
filtered_content = cls._filter_response(content)
|
||||
yield filtered_content
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
prompt: str = None,
|
||||
proxy: str = None,
|
||||
stream: bool = False,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 1,
|
||||
top_p: float = 1,
|
||||
stream: bool = True,
|
||||
api_key: str = None,
|
||||
size: str = "1:1",
|
||||
seed: int = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"authorization": "Bearer missing api key",
|
||||
"content-type": "application/json",
|
||||
"user-agent": "Mozilla/5.0"
|
||||
}
|
||||
if not await cls.check_api_key(api_key):
|
||||
pass
|
||||
|
||||
full_message = "\n".join(
|
||||
[f"{msg['role'].capitalize()}: {msg['content']}" for msg in messages]
|
||||
)
|
||||
|
||||
message_chunks = split_message(full_message, max_length=1000)
|
||||
|
||||
async with StreamSession(headers=headers, proxy=proxy) as session:
|
||||
full_response = ""
|
||||
for chunk in message_chunks:
|
||||
data = {
|
||||
"messages": [{"role": "user", "content": chunk}],
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"stream": stream
|
||||
}
|
||||
|
||||
async with session.post(cls.api_endpoint_completions, json=data) as response:
|
||||
await raise_for_status(response)
|
||||
content_type = response.headers.get('Content-Type', '').lower()
|
||||
|
||||
if 'application/json' in content_type:
|
||||
json_data = await response.json()
|
||||
if json_data.get("model") == "error":
|
||||
raise RuntimeError(json_data['choices'][0]['message'].get('content', ''))
|
||||
if stream:
|
||||
async for line in response.iter_lines():
|
||||
if line:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith("data: ") and line != "data: [DONE]":
|
||||
json_data = json.loads(line[6:])
|
||||
content = json_data['choices'][0]['delta'].get('content', '')
|
||||
if content:
|
||||
yield cls._filter_content(content)
|
||||
else:
|
||||
content = json_data['choices'][0]['message']['content']
|
||||
full_response += cls._filter_content(content)
|
||||
|
||||
yield full_response
|
||||
if cls.is_image_model(model):
|
||||
if prompt is None:
|
||||
prompt = messages[-1]['content']
|
||||
if seed is None:
|
||||
seed = random.randint(0, 10000)
|
||||
|
||||
async for result in cls.generate_image(model, prompt, api_key, size, seed, proxy):
|
||||
yield result
|
||||
else:
|
||||
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy):
|
||||
yield result
|
||||
|
||||
@classmethod
|
||||
def _filter_content(cls, part_response: str) -> str:
|
||||
@ -229,16 +234,19 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
'',
|
||||
part_response
|
||||
)
|
||||
|
||||
|
||||
part_response = re.sub(
|
||||
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
|
||||
'',
|
||||
part_response
|
||||
)
|
||||
|
||||
part_response = re.sub(
|
||||
r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # any-uncensored
|
||||
'',
|
||||
part_response
|
||||
)
|
||||
|
||||
return part_response
|
||||
|
||||
@classmethod
|
||||
def _filter_response(cls, response: str) -> str:
|
||||
filtered_response = re.sub(r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", '', response) # any-uncensored
|
||||
filtered_response = re.sub(r'<\|im_end\|>', '', response) # hermes-2-pro-mistral-7b
|
||||
filtered_response = re.sub(r'</s>', '', response) # neural-chat-7b-v3-1
|
||||
filtered_response = cls._filter_content(filtered_response)
|
||||
return filtered_response
|
||||
|
@ -129,8 +129,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
### image ###
|
||||
"flux-realism": "flux-realism",
|
||||
"flux-dev": "flux/dev",
|
||||
|
||||
"dalle-3": "dall-e-3",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@ -141,7 +139,12 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
return MODELS['image'][model]['persona_id']
|
||||
else:
|
||||
raise ValueError(f"Unknown model: {model}")
|
||||
|
||||
|
||||
@staticmethod
|
||||
def generate_chat_id() -> str:
|
||||
"""Generate a chat ID in format: 8-4-4-4-12 hexadecimal digits"""
|
||||
return str(uuid.uuid4())
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -182,22 +185,24 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"x-device-language": "en-US",
|
||||
"x-device-platform": "web",
|
||||
"x-device-uuid": device_uuid,
|
||||
"x-device-version": "1.0.42"
|
||||
"x-device-version": "1.0.45"
|
||||
}
|
||||
|
||||
async with StreamSession(headers=headers, proxy=proxy) as session:
|
||||
if model not in cls.image_models:
|
||||
data = {
|
||||
"chatId": cls.generate_chat_id(),
|
||||
"frequency_penalty": frequency_penalty,
|
||||
"max_tokens": max_tokens,
|
||||
"messages": messages,
|
||||
"model": model,
|
||||
"personaId": cls.get_personaId(model),
|
||||
"frequency_penalty": frequency_penalty,
|
||||
"max_tokens": max_tokens,
|
||||
"presence_penalty": presence_penalty,
|
||||
"stream": stream,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p
|
||||
}
|
||||
print(data)
|
||||
async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in response.iter_lines():
|
||||
|
@ -25,14 +25,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
default_model = 'blackboxai'
|
||||
default_vision_model = default_model
|
||||
default_image_model = 'Image Generation'
|
||||
image_models = ['Image Generation', 'repomap']
|
||||
default_image_model = 'flux'
|
||||
image_models = ['flux', 'repomap']
|
||||
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
|
||||
|
||||
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
|
||||
|
||||
agentMode = {
|
||||
'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
||||
'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}
|
||||
}
|
||||
|
||||
trendingAgentMode = {
|
||||
@ -79,9 +79,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
}
|
||||
|
||||
additional_prefixes = {
|
||||
'gpt-4o': '@gpt-4o',
|
||||
'gemini-pro': '@gemini-pro',
|
||||
'claude-sonnet-3.5': '@claude-sonnet'
|
||||
'gpt-4o': '@GPT-4o',
|
||||
'gemini-pro': '@Gemini-PRO',
|
||||
'claude-sonnet-3.5': '@Claude-Sonnet-3.5'
|
||||
}
|
||||
|
||||
model_prefixes = {
|
||||
@ -95,9 +95,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
|
||||
|
||||
model_aliases = {
|
||||
"gpt-3.5-turbo": "blackboxai",
|
||||
"gemini-flash": "gemini-1.5-flash",
|
||||
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
||||
"flux": "Image Generation",
|
||||
"claude-3.5-sonnet": "claude-sonnet-3.5"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@ -173,9 +173,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
if image is not None:
|
||||
messages[-1]['data'] = {
|
||||
'fileText': '',
|
||||
'imageBase64': to_data_uri(image),
|
||||
'title': image_name
|
||||
"imagesData": [
|
||||
{
|
||||
"filePath": f"MultipleFiles/{image_name}",
|
||||
"contents": to_data_uri(image)
|
||||
}
|
||||
],
|
||||
"fileText": "",
|
||||
"title": ""
|
||||
}
|
||||
|
||||
headers = {
|
||||
@ -219,6 +224,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"userSelectedModel": model if model in cls.userSelectedModel else None,
|
||||
"webSearchMode": web_search,
|
||||
"validated": validated_value,
|
||||
"imageGenerationMode": False,
|
||||
"webSearchModePrompt": False
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
|
69
g4f/Provider/Blackbox2.py
Normal file
69
g4f/Provider/Blackbox2.py
Normal file
@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import asyncio
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.blackbox.ai"
|
||||
api_endpoint = "https://www.blackbox.ai/api/improve-prompt"
|
||||
working = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'llama-3.1-70b'
|
||||
models = [default_model]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
max_retries: int = 3,
|
||||
delay: int = 1,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'text/plain;charset=UTF-8',
|
||||
'dnt': '1',
|
||||
'origin': 'https://www.blackbox.ai',
|
||||
'priority': 'u=1, i',
|
||||
'referer': 'https://www.blackbox.ai',
|
||||
'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"Linux"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
data = {
|
||||
"messages": messages,
|
||||
"max_tokens": None
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response_data = await response.json()
|
||||
if 'prompt' in response_data:
|
||||
yield response_data['prompt']
|
||||
return
|
||||
else:
|
||||
raise KeyError("'prompt' key not found in the response")
|
||||
except Exception as e:
|
||||
if attempt == max_retries - 1:
|
||||
yield f"Error after {max_retries} attempts: {str(e)}"
|
||||
else:
|
||||
wait_time = delay * (2 ** attempt) + random.uniform(0, 1)
|
||||
print(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...")
|
||||
await asyncio.sleep(wait_time)
|
@ -1,11 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
from aiohttp import ClientSession, ClientResponseError
|
||||
import json
|
||||
|
||||
from ..typing import AsyncResult, Messages, ImageType
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
|
||||
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://deepinfra.com/chat"
|
||||
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
||||
@ -13,11 +13,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
|
||||
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
||||
models = [
|
||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
||||
default_model,
|
||||
'Qwen/QwQ-32B-Preview',
|
||||
'microsoft/WizardLM-2-8x22B',
|
||||
'Qwen/Qwen2.5-72B-Instruct',
|
||||
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
@ -26,60 +27,37 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
model_aliases = {
|
||||
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
||||
"qwq-32b": "Qwen/QwQ-32B-Preview",
|
||||
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
||||
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
|
||||
"qwen-2.5-coder-32b": "Qwen2.5-Coder-32B-Instruct",
|
||||
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
||||
}
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
'Accept-Language': 'en-US,en;q=0.9',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
'Content-Type': 'application/json',
|
||||
'Origin': 'https://deepinfra.com',
|
||||
'Pragma': 'no-cache',
|
||||
'Referer': 'https://deepinfra.com/',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Site': 'same-site',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
||||
'X-Deepinfra-Source': 'web-embed',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
||||
'X-Deepinfra-Source': 'web-page',
|
||||
'accept': 'text/event-stream',
|
||||
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"Linux"',
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
'model': model,
|
||||
'messages': messages,
|
||||
'stream': True
|
||||
}
|
||||
data = {
|
||||
'model': model,
|
||||
'messages': messages,
|
||||
'stream': True
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
|
@ -28,8 +28,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
]
|
||||
|
||||
text_models = [
|
||||
'meta/meta-llama-3-70b-instruct',
|
||||
'mistralai/mixtral-8x7b-instruct-v0.1',
|
||||
'google-deepmind/gemma-2b-it',
|
||||
'yorickvp/llava-13b',
|
||||
]
|
||||
|
@ -14,6 +14,7 @@ from .local import *
|
||||
from .Airforce import Airforce
|
||||
from .AmigoChat import AmigoChat
|
||||
from .Blackbox import Blackbox
|
||||
from .Blackbox2 import Blackbox2
|
||||
from .ChatGpt import ChatGpt
|
||||
from .ChatGptEs import ChatGptEs
|
||||
from .Cloudflare import Cloudflare
|
||||
@ -24,7 +25,6 @@ from .DeepInfraChat import DeepInfraChat
|
||||
from .Free2GPT import Free2GPT
|
||||
from .FreeGpt import FreeGpt
|
||||
from .GizAI import GizAI
|
||||
from .HuggingChat import HuggingChat
|
||||
from .Liaobots import Liaobots
|
||||
from .MagickPen import MagickPen
|
||||
from .PerplexityLabs import PerplexityLabs
|
||||
|
@ -8,22 +8,24 @@ try:
|
||||
has_curl_cffi = True
|
||||
except ImportError:
|
||||
has_curl_cffi = False
|
||||
from ..typing import CreateResult, Messages
|
||||
from ..errors import MissingRequirementsError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .base_provider import ProviderModelMixin, AbstractProvider
|
||||
from .helper import format_prompt
|
||||
from ...typing import CreateResult, Messages
|
||||
from ...errors import MissingRequirementsError
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/chat"
|
||||
working = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
||||
|
||||
models = [
|
||||
'Qwen/Qwen2.5-72B-Instruct',
|
||||
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
||||
'CohereForAI/c4ai-command-r-plus-08-2024',
|
||||
'Qwen/Qwen2.5-72B-Instruct',
|
||||
'Qwen/QwQ-32B-Preview',
|
||||
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
|
||||
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
'meta-llama/Llama-3.2-11B-Vision-Instruct',
|
||||
@ -33,9 +35,10 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct",
|
||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
|
||||
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
|
||||
"qwq-32b": "Qwen/QwQ-32B-Preview",
|
||||
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
||||
"qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
||||
"llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||
@ -173,4 +176,4 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
|
||||
|
||||
if not stream:
|
||||
yield full_response
|
||||
yield full_response
|
@ -7,7 +7,7 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...errors import ModelNotFoundError
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
|
||||
from ..HuggingChat import HuggingChat
|
||||
from .HuggingChat import HuggingChat
|
||||
|
||||
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/chat"
|
||||
@ -88,4 +88,4 @@ def format_prompt(messages: Messages) -> str:
|
||||
for idx, message in enumerate(messages)
|
||||
if message["role"] == "assistant"
|
||||
])
|
||||
return f"{history}<s>[INST] {question} [/INST]"
|
||||
return f"{history}<s>[INST] {question} [/INST]"
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .OpenaiAPI import OpenaiAPI
|
||||
from ..HuggingChat import HuggingChat
|
||||
from .HuggingChat import HuggingChat
|
||||
from ...typing import AsyncResult, Messages
|
||||
|
||||
class HuggingFace2(OpenaiAPI):
|
||||
@ -25,4 +25,4 @@ class HuggingFace2(OpenaiAPI):
|
||||
) -> AsyncResult:
|
||||
return super().create_async_generator(
|
||||
model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
|
||||
)
|
||||
)
|
||||
|
@ -9,6 +9,7 @@ from .Gemini import Gemini
|
||||
from .GeminiPro import GeminiPro
|
||||
from .GithubCopilot import GithubCopilot
|
||||
from .Groq import Groq
|
||||
from .HuggingChat import HuggingChat
|
||||
from .HuggingFace import HuggingFace
|
||||
from .HuggingFace2 import HuggingFace2
|
||||
from .MetaAI import MetaAI
|
||||
@ -24,4 +25,4 @@ from .Raycast import Raycast
|
||||
from .Replicate import Replicate
|
||||
from .Theb import Theb
|
||||
from .ThebApi import ThebApi
|
||||
from .WhiteRabbitNeo import WhiteRabbitNeo
|
||||
from .WhiteRabbitNeo import WhiteRabbitNeo
|
||||
|
@ -256,7 +256,7 @@ class Images:
|
||||
prompt: str,
|
||||
model: str = None,
|
||||
provider: Optional[ProviderType] = None,
|
||||
response_format: str = "url",
|
||||
response_format: Optional[str] = None,
|
||||
proxy: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> ImagesResponse:
|
||||
@ -283,7 +283,7 @@ class Images:
|
||||
prompt: str,
|
||||
model: Optional[str] = None,
|
||||
provider: Optional[ProviderType] = None,
|
||||
response_format: Optional[str] = "url",
|
||||
response_format: Optional[str] = None,
|
||||
proxy: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> ImagesResponse:
|
||||
@ -405,32 +405,38 @@ class Images:
|
||||
raise NoImageResponseError(f"No image response from {provider_name}")
|
||||
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
|
||||
|
||||
|
||||
async def _process_image_response(
|
||||
self,
|
||||
response: ImageResponse,
|
||||
response_format: str,
|
||||
response_format: Optional[str] = None,
|
||||
proxy: str = None,
|
||||
model: Optional[str] = None,
|
||||
provider: Optional[str] = None
|
||||
) -> list[Image]:
|
||||
if response_format in ("url", "b64_json"):
|
||||
) -> ImagesResponse:
|
||||
if response_format == "url":
|
||||
# Return original URLs without saving locally
|
||||
images = [Image.construct(url=image, revised_prompt=response.alt) for image in response.get_list()]
|
||||
elif response_format == "b64_json":
|
||||
images = await copy_images(response.get_list(), response.options.get("cookies"), proxy)
|
||||
async def process_image_item(image_file: str) -> Image:
|
||||
if response_format == "b64_json":
|
||||
with open(os.path.join(images_dir, os.path.basename(image_file)), "rb") as file:
|
||||
image_data = base64.b64encode(file.read()).decode()
|
||||
return Image.construct(url=image_file, b64_json=image_data, revised_prompt=response.alt)
|
||||
return Image.construct(url=image_file, revised_prompt=response.alt)
|
||||
with open(os.path.join(images_dir, os.path.basename(image_file)), "rb") as file:
|
||||
image_data = base64.b64encode(file.read()).decode()
|
||||
return Image.construct(b64_json=image_data, revised_prompt=response.alt)
|
||||
images = await asyncio.gather(*[process_image_item(image) for image in images])
|
||||
else:
|
||||
images = [Image.construct(url=image, revised_prompt=response.alt) for image in response.get_list()]
|
||||
# Save locally for None (default) case
|
||||
images = await copy_images(response.get_list(), response.options.get("cookies"), proxy)
|
||||
images = [Image.construct(url=f"/images/{os.path.basename(image)}", revised_prompt=response.alt) for image in images]
|
||||
last_provider = get_last_provider(True)
|
||||
return ImagesResponse.construct(
|
||||
images,
|
||||
created=int(time.time()),
|
||||
data=images,
|
||||
model=last_provider.get("model") if model is None else model,
|
||||
provider=last_provider.get("name") if provider is None else provider
|
||||
)
|
||||
|
||||
|
||||
class AsyncClient(BaseClient):
|
||||
def __init__(
|
||||
self,
|
||||
@ -513,7 +519,7 @@ class AsyncImages(Images):
|
||||
prompt: str,
|
||||
model: Optional[str] = None,
|
||||
provider: Optional[ProviderType] = None,
|
||||
response_format: str = "url",
|
||||
response_format: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> ImagesResponse:
|
||||
return await self.async_generate(prompt, model, provider, response_format, **kwargs)
|
||||
@ -528,4 +534,4 @@ class AsyncImages(Images):
|
||||
) -> ImagesResponse:
|
||||
return await self.async_create_variation(
|
||||
image, model, provider, response_format, **kwargs
|
||||
)
|
||||
)
|
||||
|
@ -7,13 +7,13 @@ from .Provider import (
|
||||
AIChatFree,
|
||||
AmigoChat,
|
||||
Blackbox,
|
||||
Blackbox2,
|
||||
BingCreateImages,
|
||||
ChatGpt,
|
||||
ChatGptEs,
|
||||
Cloudflare,
|
||||
Copilot,
|
||||
CopilotAccount,
|
||||
DarkAI,
|
||||
DDG,
|
||||
DeepInfraChat,
|
||||
Free2GPT,
|
||||
@ -67,6 +67,7 @@ default = Model(
|
||||
DDG,
|
||||
Pizzagpt,
|
||||
ReplicateHome,
|
||||
Blackbox2,
|
||||
Upstage,
|
||||
Blackbox,
|
||||
Free2GPT,
|
||||
@ -74,7 +75,6 @@ default = Model(
|
||||
Airforce,
|
||||
ChatGptEs,
|
||||
Cloudflare,
|
||||
DarkAI,
|
||||
Mhystical,
|
||||
AmigoChat,
|
||||
])
|
||||
@ -89,14 +89,14 @@ default = Model(
|
||||
gpt_35_turbo = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([Airforce])
|
||||
best_provider = Blackbox
|
||||
)
|
||||
|
||||
# gpt-4
|
||||
gpt_4o = Model(
|
||||
name = 'gpt-4o',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
||||
best_provider = IterListProvider([Blackbox, ChatGptEs, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
||||
)
|
||||
|
||||
gpt_4o_mini = Model(
|
||||
@ -127,7 +127,7 @@ o1_preview = Model(
|
||||
o1_mini = Model(
|
||||
name = 'o1-mini',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = Liaobots
|
||||
best_provider = IterListProvider([Liaobots, Airforce])
|
||||
)
|
||||
|
||||
### GigaChat ###
|
||||
@ -167,20 +167,20 @@ llama_3_1_8b = Model(
|
||||
llama_3_1_70b = Model(
|
||||
name = "llama-3.1-70b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, TeachAnything, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
||||
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
||||
)
|
||||
|
||||
llama_3_1_405b = Model(
|
||||
name = "llama-3.1-405b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([Blackbox, DarkAI, AmigoChat])
|
||||
best_provider = IterListProvider([Blackbox, AmigoChat])
|
||||
)
|
||||
|
||||
# llama 3.2
|
||||
llama_3_2_1b = Model(
|
||||
name = "llama-3.2-1b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([Cloudflare])
|
||||
best_provider = Cloudflare
|
||||
)
|
||||
|
||||
llama_3_2_11b = Model(
|
||||
@ -256,7 +256,7 @@ mixtral_8x7b_dpo = Model(
|
||||
phi_2 = Model(
|
||||
name = "phi-2",
|
||||
base_provider = "Microsoft",
|
||||
best_provider = IterListProvider([Airforce])
|
||||
best_provider = Airforce
|
||||
)
|
||||
|
||||
phi_3_5_mini = Model(
|
||||
@ -371,7 +371,7 @@ qwen_2_72b = Model(
|
||||
qwen_2_5_72b = Model(
|
||||
name = 'qwen-2.5-72b',
|
||||
base_provider = 'Qwen',
|
||||
best_provider = AmigoChat
|
||||
best_provider = IterListProvider([AmigoChat, HuggingChat, HuggingFace])
|
||||
)
|
||||
|
||||
qwen_2_5_coder_32b = Model(
|
||||
@ -380,6 +380,12 @@ qwen_2_5_coder_32b = Model(
|
||||
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
||||
)
|
||||
|
||||
qwq_32b = Model(
|
||||
name = 'qwq-32b',
|
||||
base_provider = 'Qwen',
|
||||
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
||||
)
|
||||
|
||||
### Upstage ###
|
||||
solar_mini = Model(
|
||||
name = 'solar-mini',
|
||||
@ -632,10 +638,10 @@ flux_4o = Model(
|
||||
)
|
||||
|
||||
### OpenAI ###
|
||||
dalle_3 = Model(
|
||||
dall_e_3 = Model(
|
||||
name = 'dall-e-3',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([CopilotAccount, OpenaiAccount, MicrosoftDesigner, BingCreateImages])
|
||||
best_provider = IterListProvider([Airforce, CopilotAccount, OpenaiAccount, MicrosoftDesigner, BingCreateImages])
|
||||
)
|
||||
|
||||
### Recraft ###
|
||||
@ -761,8 +767,10 @@ class ModelUtils:
|
||||
# qwen 2.5
|
||||
'qwen-2.5-72b': qwen_2_5_72b,
|
||||
'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
|
||||
'qwq-32b': qwq_32b,
|
||||
|
||||
### Upstage ###
|
||||
'solar-mini': solar_mini,
|
||||
'solar-pro': solar_pro,
|
||||
|
||||
### Inflection ###
|
||||
@ -796,6 +804,12 @@ class ModelUtils:
|
||||
### Nvidia ###
|
||||
'nemotron-70b': nemotron_70b,
|
||||
|
||||
### Teknium ###
|
||||
'openhermes-2.5': openhermes_2_5,
|
||||
|
||||
### Liquid ###
|
||||
'lfm-40b': lfm_40b,
|
||||
|
||||
### databricks ###
|
||||
'dbrx-instruct': dbrx_instruct,
|
||||
|
||||
@ -805,6 +819,15 @@ class ModelUtils:
|
||||
### anthracite-org ###
|
||||
'jamba-mini': jamba_mini,
|
||||
|
||||
### HuggingFaceH4 ###
|
||||
'zephyr-7b': zephyr_7b,
|
||||
|
||||
### Inferless ###
|
||||
'neural-7b': neural_7b,
|
||||
|
||||
### Gryphe ###
|
||||
'mythomax-13b': mythomax_13b,
|
||||
|
||||
### llmplayground.net ###
|
||||
'any-uncensored': any_uncensored,
|
||||
|
||||
@ -831,8 +854,7 @@ class ModelUtils:
|
||||
'flux-4o': flux_4o,
|
||||
|
||||
### OpenAI ###
|
||||
'dalle-3': dalle_3,
|
||||
'dall-e-3': dalle_3,
|
||||
'dall-e-3': dall_e_3,
|
||||
|
||||
### Recraft ###
|
||||
'recraft-v3': recraft_v3,
|
||||
|
Loading…
Reference in New Issue
Block a user