mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 02:52:29 +03:00
Major Provider Updates and Model Support Enhancements (#2467)
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. --------- Co-authored-by: kqlio67 <>
This commit is contained in:
parent
5969983d83
commit
a358b28f47
3
.gitignore
vendored
3
.gitignore
vendored
@ -65,4 +65,5 @@ x.txt
|
|||||||
bench.py
|
bench.py
|
||||||
to-reverse.txt
|
to-reverse.txt
|
||||||
g4f/Provider/OpenaiChat2.py
|
g4f/Provider/OpenaiChat2.py
|
||||||
generated_images/
|
generated_images/
|
||||||
|
g4f/Provider/.cache
|
||||||
|
@ -1,18 +1,19 @@
|
|||||||
from __future__ import annotations
|
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import requests
|
import requests
|
||||||
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
from typing import List
|
||||||
|
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
||||||
from ..typing import AsyncResult, Messages
|
from ..typing import AsyncResult, Messages
|
||||||
from ..image import ImageResponse
|
from ..image import ImageResponse
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
from .. import debug
|
||||||
|
|
||||||
def split_message(message: str, max_length: int = 1000) -> list[str]:
|
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||||
|
|
||||||
|
def split_message(message: str, max_length: int = 1000) -> List[str]:
|
||||||
"""Splits the message into parts up to (max_length)."""
|
"""Splits the message into parts up to (max_length)."""
|
||||||
chunks = []
|
chunks = []
|
||||||
while len(message) > max_length:
|
while len(message) > max_length:
|
||||||
@ -38,6 +39,8 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
default_model = "gpt-4o-mini"
|
default_model = "gpt-4o-mini"
|
||||||
default_image_model = "flux"
|
default_image_model = "flux"
|
||||||
|
|
||||||
|
hidden_models = {"Flux-1.1-Pro"}
|
||||||
|
|
||||||
additional_models_imagine = ["flux-1.1-pro", "dall-e-3"]
|
additional_models_imagine = ["flux-1.1-pro", "dall-e-3"]
|
||||||
|
|
||||||
@ -54,39 +57,38 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"llama-3.1-70b": "llama-3.1-70b-turbo",
|
"llama-3.1-70b": "llama-3.1-70b-turbo",
|
||||||
"neural-7b": "neural-chat-7b-v3-1",
|
"neural-7b": "neural-chat-7b-v3-1",
|
||||||
"zephyr-7b": "zephyr-7b-beta",
|
"zephyr-7b": "zephyr-7b-beta",
|
||||||
|
"evil": "any-uncensored",
|
||||||
"sdxl": "stable-diffusion-xl-base",
|
"sdxl": "stable-diffusion-xl-base",
|
||||||
"flux-pro": "flux-1.1-pro",
|
"flux-pro": "flux-1.1-pro",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def fetch_completions_models(cls):
|
|
||||||
response = requests.get('https://api.airforce/models', verify=False)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
return [model['id'] for model in data['data']]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def fetch_imagine_models(cls):
|
|
||||||
response = requests.get(
|
|
||||||
'https://api.airforce/v1/imagine2/models',
|
|
||||||
verify=False
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.json()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def is_image_model(cls, model: str) -> bool:
|
|
||||||
return model in cls.image_models
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls):
|
def get_models(cls):
|
||||||
if not cls.models:
|
if not cls.image_models:
|
||||||
cls.image_models = cls.fetch_imagine_models() + cls.additional_models_imagine
|
try:
|
||||||
cls.models = list(dict.fromkeys([cls.default_model] +
|
url = "https://api.airforce/imagine2/models"
|
||||||
cls.fetch_completions_models() +
|
response = requests.get(url, verify=False)
|
||||||
cls.image_models))
|
response.raise_for_status()
|
||||||
return cls.models
|
cls.image_models = response.json()
|
||||||
|
cls.image_models.extend(cls.additional_models_imagine)
|
||||||
|
except Exception as e:
|
||||||
|
debug.log(f"Error fetching image models: {e}")
|
||||||
|
|
||||||
|
if not cls.models:
|
||||||
|
try:
|
||||||
|
url = "https://api.airforce/models"
|
||||||
|
response = requests.get(url, verify=False)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
cls.models = [model['id'] for model in data['data']]
|
||||||
|
cls.models.extend(cls.image_models)
|
||||||
|
cls.models = [model for model in cls.models if model not in cls.hidden_models]
|
||||||
|
except Exception as e:
|
||||||
|
debug.log(f"Error fetching text models: {e}")
|
||||||
|
cls.models = [cls.default_model]
|
||||||
|
|
||||||
|
return cls.models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def check_api_key(cls, api_key: str) -> bool:
|
async def check_api_key(cls, api_key: str) -> bool:
|
||||||
"""
|
"""
|
||||||
@ -111,6 +113,37 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
print(f"Error checking API key: {str(e)}")
|
print(f"Error checking API key: {str(e)}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _filter_content(cls, part_response: str) -> str:
|
||||||
|
"""
|
||||||
|
Filters out unwanted content from the partial response.
|
||||||
|
"""
|
||||||
|
part_response = re.sub(
|
||||||
|
r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
|
||||||
|
'',
|
||||||
|
part_response
|
||||||
|
)
|
||||||
|
|
||||||
|
part_response = re.sub(
|
||||||
|
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
|
||||||
|
'',
|
||||||
|
part_response
|
||||||
|
)
|
||||||
|
|
||||||
|
return part_response
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _filter_response(cls, response: str) -> str:
|
||||||
|
"""
|
||||||
|
Filters the full response to remove system errors and other unwanted text.
|
||||||
|
"""
|
||||||
|
filtered_response = re.sub(r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", '', response) # any-uncensored
|
||||||
|
filtered_response = re.sub(r'<\|im_end\|>', '', filtered_response) # remove <|im_end|> token
|
||||||
|
filtered_response = re.sub(r'</s>', '', filtered_response) # neural-chat-7b-v3-1
|
||||||
|
filtered_response = re.sub(r'^(Assistant: |AI: |ANSWER: |Output: )', '', filtered_response) # phi-2
|
||||||
|
filtered_response = cls._filter_content(filtered_response)
|
||||||
|
return filtered_response
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def generate_image(
|
async def generate_image(
|
||||||
cls,
|
cls,
|
||||||
@ -124,6 +157,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||||
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
|
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
|
||||||
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Authorization": f"Bearer {api_key}",
|
"Authorization": f"Bearer {api_key}",
|
||||||
@ -151,9 +185,13 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
api_key: str,
|
api_key: str,
|
||||||
proxy: str = None
|
proxy: str = None
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
|
"""
|
||||||
|
Generates text, buffers the response, filters it, and returns the final result.
|
||||||
|
"""
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||||
"Accept": "application/json, text/event-stream",
|
"Accept": "application/json, text/event-stream",
|
||||||
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Authorization": f"Bearer {api_key}",
|
"Authorization": f"Bearer {api_key}",
|
||||||
@ -175,6 +213,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
|
buffer = [] # Buffer to collect partial responses
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
line = line.decode('utf-8').strip()
|
line = line.decode('utf-8').strip()
|
||||||
if line.startswith('data: '):
|
if line.startswith('data: '):
|
||||||
@ -184,18 +223,20 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if 'choices' in chunk and chunk['choices']:
|
if 'choices' in chunk and chunk['choices']:
|
||||||
delta = chunk['choices'][0].get('delta', {})
|
delta = chunk['choices'][0].get('delta', {})
|
||||||
if 'content' in delta:
|
if 'content' in delta:
|
||||||
filtered_content = cls._filter_response(delta['content'])
|
buffer.append(delta['content'])
|
||||||
yield filtered_content
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
|
# Combine the buffered response and filter it
|
||||||
|
filtered_response = cls._filter_response(''.join(buffer))
|
||||||
|
yield filtered_response
|
||||||
else:
|
else:
|
||||||
# Non-streaming response
|
# Non-streaming response
|
||||||
result = await response.json()
|
result = await response.json()
|
||||||
if 'choices' in result and result['choices']:
|
if 'choices' in result and result['choices']:
|
||||||
message = result['choices'][0].get('message', {})
|
message = result['choices'][0].get('message', {})
|
||||||
content = message.get('content', '')
|
content = message.get('content', '')
|
||||||
filtered_content = cls._filter_response(content)
|
filtered_response = cls._filter_response(content)
|
||||||
yield filtered_content
|
yield filtered_response
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
@ -217,7 +258,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
if cls.is_image_model(model):
|
if model in cls.image_models:
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = messages[-1]['content']
|
prompt = messages[-1]['content']
|
||||||
if seed is None:
|
if seed is None:
|
||||||
@ -227,27 +268,3 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
else:
|
else:
|
||||||
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy):
|
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy):
|
||||||
yield result
|
yield result
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _filter_content(cls, part_response: str) -> str:
|
|
||||||
part_response = re.sub(
|
|
||||||
r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
|
|
||||||
'',
|
|
||||||
part_response
|
|
||||||
)
|
|
||||||
|
|
||||||
part_response = re.sub(
|
|
||||||
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
|
|
||||||
'',
|
|
||||||
part_response
|
|
||||||
)
|
|
||||||
|
|
||||||
return part_response
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _filter_response(cls, response: str) -> str:
|
|
||||||
filtered_response = re.sub(r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", '', response) # any-uncensored
|
|
||||||
filtered_response = re.sub(r'<\|im_end\|>', '', response) # hermes-2-pro-mistral-7b
|
|
||||||
filtered_response = re.sub(r'</s>', '', response) # neural-chat-7b-v3-1
|
|
||||||
filtered_response = cls._filter_content(filtered_response)
|
|
||||||
return filtered_response
|
|
||||||
|
@ -7,6 +7,10 @@ import json
|
|||||||
import re
|
import re
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages, ImageType
|
from ..typing import AsyncResult, Messages, ImageType
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from ..image import ImageResponse, to_data_uri
|
from ..image import ImageResponse, to_data_uri
|
||||||
@ -17,22 +21,22 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
label = "Blackbox AI"
|
label = "Blackbox AI"
|
||||||
url = "https://www.blackbox.ai"
|
url = "https://www.blackbox.ai"
|
||||||
api_endpoint = "https://www.blackbox.ai/api/chat"
|
api_endpoint = "https://www.blackbox.ai/api/chat"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
_last_validated_value = None
|
|
||||||
|
|
||||||
default_model = 'blackboxai'
|
default_model = 'blackboxai'
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
default_image_model = 'flux'
|
default_image_model = 'flux'
|
||||||
image_models = ['flux', 'repomap']
|
image_models = ['ImageGeneration', 'repomap']
|
||||||
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
|
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
|
||||||
|
|
||||||
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
|
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
|
||||||
|
|
||||||
agentMode = {
|
agentMode = {
|
||||||
'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}
|
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}
|
||||||
}
|
}
|
||||||
|
|
||||||
trendingAgentMode = {
|
trendingAgentMode = {
|
||||||
@ -95,22 +99,63 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
|
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
|
"gpt-4": "blackboxai",
|
||||||
|
"gpt-4": "gpt-4o",
|
||||||
|
"gpt-4o-mini": "gpt-4o",
|
||||||
"gpt-3.5-turbo": "blackboxai",
|
"gpt-3.5-turbo": "blackboxai",
|
||||||
"gemini-flash": "gemini-1.5-flash",
|
"gemini-flash": "gemini-1.5-flash",
|
||||||
"claude-3.5-sonnet": "claude-sonnet-3.5"
|
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
||||||
|
"flux": "ImageGeneration",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def fetch_validated(cls):
|
def _get_cache_dir(cls) -> Path:
|
||||||
if cls._last_validated_value:
|
# Get the path to the current file
|
||||||
return cls._last_validated_value
|
current_file = Path(__file__)
|
||||||
|
# Create the path to the .cache directory
|
||||||
|
cache_dir = current_file.parent / '.cache'
|
||||||
|
# Create a directory if it does not exist
|
||||||
|
cache_dir.mkdir(exist_ok=True)
|
||||||
|
return cache_dir
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_cache_file(cls) -> Path:
|
||||||
|
return cls._get_cache_dir() / 'blackbox.json'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _load_cached_value(cls) -> str | None:
|
||||||
|
cache_file = cls._get_cache_file()
|
||||||
|
if cache_file.exists():
|
||||||
|
try:
|
||||||
|
with open(cache_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
return data.get('validated_value')
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading cache file: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _save_cached_value(cls, value: str):
|
||||||
|
cache_file = cls._get_cache_file()
|
||||||
|
try:
|
||||||
|
with open(cache_file, 'w') as f:
|
||||||
|
json.dump({'validated_value': value}, f)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error writing to cache file: {e}")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def fetch_validated(cls):
|
||||||
|
# Let's try to load the value from the cache first
|
||||||
|
cached_value = cls._load_cached_value()
|
||||||
|
if cached_value:
|
||||||
|
return cached_value
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
try:
|
try:
|
||||||
async with session.get(cls.url) as response:
|
async with session.get(cls.url) as response:
|
||||||
if response.status != 200:
|
if response.status != 200:
|
||||||
print("Failed to load the page.")
|
print("Failed to load the page.")
|
||||||
return cls._last_validated_value
|
return cached_value
|
||||||
|
|
||||||
page_content = await response.text()
|
page_content = await response.text()
|
||||||
js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
|
js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
|
||||||
@ -125,12 +170,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
match = key_pattern.search(js_content)
|
match = key_pattern.search(js_content)
|
||||||
if match:
|
if match:
|
||||||
validated_value = match.group(1)
|
validated_value = match.group(1)
|
||||||
cls._last_validated_value = validated_value
|
# Save the new value to the cache file
|
||||||
|
cls._save_cached_value(validated_value)
|
||||||
return validated_value
|
return validated_value
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error fetching validated value: {e}")
|
print(f"Error fetching validated value: {e}")
|
||||||
|
|
||||||
return cls._last_validated_value
|
return cached_value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_id(length=7):
|
def generate_id(length=7):
|
||||||
@ -162,12 +208,16 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
image: ImageType = None,
|
image: ImageType = None,
|
||||||
image_name: str = None,
|
image_name: str = None,
|
||||||
|
top_p: float = 0.9,
|
||||||
|
temperature: float = 0.5,
|
||||||
|
max_tokens: int = 1024,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
message_id = cls.generate_id()
|
message_id = cls.generate_id()
|
||||||
messages = cls.add_prefix_to_messages(messages, model)
|
messages = cls.add_prefix_to_messages(messages, model)
|
||||||
validated_value = await cls.fetch_validated()
|
validated_value = await cls.fetch_validated()
|
||||||
formatted_message = format_prompt(messages)
|
formatted_message = format_prompt(messages)
|
||||||
|
model = cls.get_model(model)
|
||||||
|
|
||||||
messages = [{"id": message_id, "content": formatted_message, "role": "user"}]
|
messages = [{"id": message_id, "content": formatted_message, "role": "user"}]
|
||||||
|
|
||||||
@ -185,20 +235,10 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'accept': '*/*',
|
'accept': '*/*',
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
|
||||||
'cache-control': 'no-cache',
|
|
||||||
'content-type': 'application/json',
|
'content-type': 'application/json',
|
||||||
'origin': cls.url,
|
'origin': cls.url,
|
||||||
'pragma': 'no-cache',
|
|
||||||
'priority': 'u=1, i',
|
|
||||||
'referer': f'{cls.url}/',
|
'referer': f'{cls.url}/',
|
||||||
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"Linux"',
|
|
||||||
'sec-fetch-dest': 'empty',
|
|
||||||
'sec-fetch-mode': 'cors',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
@ -211,9 +251,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
|
"trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
|
||||||
"isMicMode": False,
|
"isMicMode": False,
|
||||||
"userSystemPrompt": None,
|
"userSystemPrompt": None,
|
||||||
"maxTokens": 1024,
|
"maxTokens": max_tokens,
|
||||||
"playgroundTopP": 0.9,
|
"playgroundTopP": top_p,
|
||||||
"playgroundTemperature": 0.5,
|
"playgroundTemperature": temperature,
|
||||||
"isChromeExt": False,
|
"isChromeExt": False,
|
||||||
"githubToken": None,
|
"githubToken": None,
|
||||||
"clickedAnswer2": False,
|
"clickedAnswer2": False,
|
||||||
@ -225,7 +265,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"webSearchMode": web_search,
|
"webSearchMode": web_search,
|
||||||
"validated": validated_value,
|
"validated": validated_value,
|
||||||
"imageGenerationMode": False,
|
"imageGenerationMode": False,
|
||||||
"webSearchModePrompt": False
|
"webSearchModePrompt": web_search
|
||||||
}
|
}
|
||||||
|
|
||||||
async with ClientSession(headers=headers) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
|
@ -3,20 +3,30 @@ from __future__ import annotations
|
|||||||
import random
|
import random
|
||||||
import asyncio
|
import asyncio
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
from typing import Union, AsyncGenerator
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..image import ImageResponse
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
|
||||||
class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://www.blackbox.ai"
|
url = "https://www.blackbox.ai"
|
||||||
api_endpoint = "https://www.blackbox.ai/api/improve-prompt"
|
api_endpoints = {
|
||||||
|
"llama-3.1-70b": "https://www.blackbox.ai/api/improve-prompt",
|
||||||
|
"flux": "https://www.blackbox.ai/api/image-generator"
|
||||||
|
}
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
supports_stream = False
|
supports_stream = False
|
||||||
|
|
||||||
default_model = 'llama-3.1-70b'
|
default_model = 'llama-3.1-70b'
|
||||||
models = [default_model]
|
chat_models = ['llama-3.1-70b']
|
||||||
|
image_models = ['flux']
|
||||||
|
models = [*chat_models, *image_models]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
@ -27,23 +37,27 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
max_retries: int = 3,
|
max_retries: int = 3,
|
||||||
delay: int = 1,
|
delay: int = 1,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncGenerator:
|
||||||
headers = {
|
if model in cls.chat_models:
|
||||||
'accept': '*/*',
|
async for result in cls._generate_text(model, messages, proxy, max_retries, delay):
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
yield result
|
||||||
'content-type': 'text/plain;charset=UTF-8',
|
elif model in cls.image_models:
|
||||||
'dnt': '1',
|
async for result in cls._generate_image(model, messages, proxy):
|
||||||
'origin': 'https://www.blackbox.ai',
|
yield result
|
||||||
'priority': 'u=1, i',
|
else:
|
||||||
'referer': 'https://www.blackbox.ai',
|
raise ValueError(f"Unsupported model: {model}")
|
||||||
'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
@classmethod
|
||||||
'sec-ch-ua-platform': '"Linux"',
|
async def _generate_text(
|
||||||
'sec-fetch-dest': 'empty',
|
cls,
|
||||||
'sec-fetch-mode': 'cors',
|
model: str,
|
||||||
'sec-fetch-site': 'same-origin',
|
messages: Messages,
|
||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
proxy: str = None,
|
||||||
}
|
max_retries: int = 3,
|
||||||
|
delay: int = 1
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
headers = cls._get_headers()
|
||||||
|
api_endpoint = cls.api_endpoints[model]
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
@ -53,7 +67,7 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
async with ClientSession(headers=headers) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
for attempt in range(max_retries):
|
for attempt in range(max_retries):
|
||||||
try:
|
try:
|
||||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
response_data = await response.json()
|
response_data = await response.json()
|
||||||
if 'prompt' in response_data:
|
if 'prompt' in response_data:
|
||||||
@ -68,3 +82,39 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
wait_time = delay * (2 ** attempt) + random.uniform(0, 1)
|
wait_time = delay * (2 ** attempt) + random.uniform(0, 1)
|
||||||
debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...")
|
debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...")
|
||||||
await asyncio.sleep(wait_time)
|
await asyncio.sleep(wait_time)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _generate_image(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
headers = cls._get_headers()
|
||||||
|
api_endpoint = cls.api_endpoints[model]
|
||||||
|
|
||||||
|
async with ClientSession(headers=headers) as session:
|
||||||
|
prompt = messages[-1]["content"]
|
||||||
|
data = {
|
||||||
|
"query": prompt
|
||||||
|
}
|
||||||
|
|
||||||
|
async with session.post(api_endpoint, headers=headers, json=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
response_data = await response.json()
|
||||||
|
|
||||||
|
if 'markdown' in response_data:
|
||||||
|
image_url = response_data['markdown'].split('(')[1].split(')')[0]
|
||||||
|
yield ImageResponse(images=image_url, alt=prompt)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_headers() -> dict:
|
||||||
|
return {
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en-US,en;q=0.9',
|
||||||
|
'content-type': 'text/plain;charset=UTF-8',
|
||||||
|
'origin': 'https://www.blackbox.ai',
|
||||||
|
'priority': 'u=1, i',
|
||||||
|
'referer': 'https://www.blackbox.ai',
|
||||||
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
@ -12,17 +12,14 @@ from .helper import format_prompt
|
|||||||
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://chatgpt.es"
|
url = "https://chatgpt.es"
|
||||||
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
|
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = 'gpt-4o'
|
default_model = 'gpt-4o'
|
||||||
models = ['gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest']
|
models = ['gpt-3.5-turbo', 'gpt-4o', 'gpt-4o-mini']
|
||||||
|
|
||||||
model_aliases = {
|
|
||||||
"gpt-4o": "chatgpt-4o-latest",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_model(cls, model: str) -> str:
|
def get_model(cls, model: str) -> str:
|
||||||
|
@ -16,17 +16,15 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = 'llama-3-405b'
|
default_model = 'llama-3-70b'
|
||||||
models = [
|
models = [
|
||||||
'gpt-4o', # Uncensored
|
'gpt-4o', # Uncensored
|
||||||
'gpt-3.5-turbo', # Uncensored
|
'gpt-3.5-turbo', # Uncensored
|
||||||
'llama-3-70b', # Uncensored
|
|
||||||
default_model,
|
default_model,
|
||||||
]
|
]
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"llama-3.1-70b": "llama-3-70b",
|
"llama-3.1-70b": "llama-3-70b",
|
||||||
"llama-3.1-405b": "llama-3-405b",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -43,6 +43,7 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
|
'Accept-Language': 'en-US,en;q=0.9',
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Origin': 'https://deepinfra.com',
|
'Origin': 'https://deepinfra.com',
|
||||||
'Referer': 'https://deepinfra.com/',
|
'Referer': 'https://deepinfra.com/',
|
||||||
|
@ -36,8 +36,8 @@ models = {
|
|||||||
"tokenLimit": 126000,
|
"tokenLimit": 126000,
|
||||||
"context": "128K",
|
"context": "128K",
|
||||||
},
|
},
|
||||||
"o1-preview": {
|
"o1-preview-2024-09-12": {
|
||||||
"id": "o1-preview",
|
"id": "o1-preview-2024-09-12",
|
||||||
"name": "o1-preview",
|
"name": "o1-preview",
|
||||||
"model": "o1",
|
"model": "o1",
|
||||||
"provider": "OpenAI",
|
"provider": "OpenAI",
|
||||||
@ -45,8 +45,8 @@ models = {
|
|||||||
"tokenLimit": 100000,
|
"tokenLimit": 100000,
|
||||||
"context": "128K",
|
"context": "128K",
|
||||||
},
|
},
|
||||||
"o1-mini": {
|
"o1-mini-2024-09-12": {
|
||||||
"id": "o1-mini",
|
"id": "o1-mini-2024-09-12",
|
||||||
"name": "o1-mini",
|
"name": "o1-mini",
|
||||||
"model": "o1",
|
"model": "o1",
|
||||||
"provider": "OpenAI",
|
"provider": "OpenAI",
|
||||||
@ -152,6 +152,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||||
"gpt-4": "gpt-4o-2024-08-06",
|
"gpt-4": "gpt-4o-2024-08-06",
|
||||||
|
|
||||||
|
"o1-preview": "o1-preview-2024-09-12",
|
||||||
|
"o1-mini": "o1-mini-2024-09-12",
|
||||||
|
|
||||||
"claude-3-opus": "claude-3-opus-20240229",
|
"claude-3-opus": "claude-3-opus-20240229",
|
||||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
|
||||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
||||||
|
107
g4f/Provider/PollinationsAI.py
Normal file
107
g4f/Provider/PollinationsAI.py
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from urllib.parse import quote
|
||||||
|
import random
|
||||||
|
import requests
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..image import ImageResponse
|
||||||
|
from ..requests.raise_for_status import raise_for_status
|
||||||
|
from ..requests.aiohttp import get_connector
|
||||||
|
from .needs_auth.OpenaiAPI import OpenaiAPI
|
||||||
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
class PollinationsAI(OpenaiAPI):
|
||||||
|
label = "Pollinations.AI"
|
||||||
|
url = "https://pollinations.ai"
|
||||||
|
|
||||||
|
working = True
|
||||||
|
needs_auth = False
|
||||||
|
supports_stream = True
|
||||||
|
|
||||||
|
default_model = "openai"
|
||||||
|
|
||||||
|
additional_models_image = ["unity", "midijourney", "rtist"]
|
||||||
|
additional_models_text = ["sur", "sur-mistral", "claude"]
|
||||||
|
|
||||||
|
model_aliases = {
|
||||||
|
"gpt-4o": "openai",
|
||||||
|
"mistral-nemo": "mistral",
|
||||||
|
"llama-3.1-70b": "llama", #
|
||||||
|
"gpt-3.5-turbo": "searchgpt",
|
||||||
|
"gpt-4": "searchgpt",
|
||||||
|
"gpt-3.5-turbo": "claude",
|
||||||
|
"gpt-4": "claude",
|
||||||
|
"qwen-2.5-coder-32b": "qwen-coder",
|
||||||
|
"claude-3.5-sonnet": "sur",
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_models(cls):
|
||||||
|
if not hasattr(cls, 'image_models'):
|
||||||
|
cls.image_models = []
|
||||||
|
if not cls.image_models:
|
||||||
|
url = "https://image.pollinations.ai/models"
|
||||||
|
response = requests.get(url)
|
||||||
|
raise_for_status(response)
|
||||||
|
cls.image_models = response.json()
|
||||||
|
cls.image_models.extend(cls.additional_models_image)
|
||||||
|
if not hasattr(cls, 'models'):
|
||||||
|
cls.models = []
|
||||||
|
if not cls.models:
|
||||||
|
url = "https://text.pollinations.ai/models"
|
||||||
|
response = requests.get(url)
|
||||||
|
raise_for_status(response)
|
||||||
|
cls.models = [model.get("name") for model in response.json()]
|
||||||
|
cls.models.extend(cls.image_models)
|
||||||
|
cls.models.extend(cls.additional_models_text)
|
||||||
|
return cls.models
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
prompt: str = None,
|
||||||
|
api_base: str = "https://text.pollinations.ai/openai",
|
||||||
|
api_key: str = None,
|
||||||
|
proxy: str = None,
|
||||||
|
seed: str = None,
|
||||||
|
width: int = 1024,
|
||||||
|
height: int = 1024,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
model = cls.get_model(model)
|
||||||
|
if model in cls.image_models:
|
||||||
|
async for response in cls._generate_image(model, messages, prompt, seed, width, height):
|
||||||
|
yield response
|
||||||
|
elif model in cls.models:
|
||||||
|
async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs):
|
||||||
|
yield response
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown model: {model}")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024):
|
||||||
|
if prompt is None:
|
||||||
|
prompt = messages[-1]["content"]
|
||||||
|
if seed is None:
|
||||||
|
seed = random.randint(0, 100000)
|
||||||
|
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
|
||||||
|
yield ImageResponse(image, prompt)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
|
||||||
|
if api_key is None:
|
||||||
|
async with ClientSession(connector=get_connector(proxy=proxy)) as session:
|
||||||
|
prompt = format_prompt(messages)
|
||||||
|
async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
async for line in response.content.iter_any():
|
||||||
|
yield line.decode(errors="ignore")
|
||||||
|
else:
|
||||||
|
async for chunk in super().create_async_generator(
|
||||||
|
model, messages, api_base=api_base, proxy=proxy, **kwargs
|
||||||
|
):
|
||||||
|
yield chunk
|
@ -19,7 +19,8 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = 'yorickvp/llava-13b'
|
default_model = 'google-deepmind/gemma-2b-it'
|
||||||
|
default_image_model = 'stability-ai/stable-diffusion-3'
|
||||||
|
|
||||||
image_models = [
|
image_models = [
|
||||||
'stability-ai/stable-diffusion-3',
|
'stability-ai/stable-diffusion-3',
|
||||||
@ -29,7 +30,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
text_models = [
|
text_models = [
|
||||||
'google-deepmind/gemma-2b-it',
|
'google-deepmind/gemma-2b-it',
|
||||||
'yorickvp/llava-13b',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
models = text_models + image_models
|
models = text_models + image_models
|
||||||
@ -42,7 +42,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
# text_models
|
# text_models
|
||||||
"gemma-2b": "google-deepmind/gemma-2b-it",
|
"gemma-2b": "google-deepmind/gemma-2b-it",
|
||||||
"llava-13b": "yorickvp/llava-13b",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model_versions = {
|
model_versions = {
|
||||||
@ -53,7 +52,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
# text_models
|
# text_models
|
||||||
"google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
|
"google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
|
||||||
"yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -70,18 +68,9 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
headers = {
|
headers = {
|
||||||
"accept": "*/*",
|
"accept": "*/*",
|
||||||
"accept-language": "en-US,en;q=0.9",
|
"accept-language": "en-US,en;q=0.9",
|
||||||
"cache-control": "no-cache",
|
|
||||||
"content-type": "application/json",
|
"content-type": "application/json",
|
||||||
"origin": "https://replicate.com",
|
"origin": "https://replicate.com",
|
||||||
"pragma": "no-cache",
|
|
||||||
"priority": "u=1, i",
|
|
||||||
"referer": "https://replicate.com/",
|
"referer": "https://replicate.com/",
|
||||||
"sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
|
|
||||||
"sec-ch-ua-mobile": "?0",
|
|
||||||
"sec-ch-ua-platform": '"Linux"',
|
|
||||||
"sec-fetch-dest": "empty",
|
|
||||||
"sec-fetch-mode": "cors",
|
|
||||||
"sec-fetch-site": "same-site",
|
|
||||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,10 +2,24 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
HAS_BEAUTIFULSOUP = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BEAUTIFULSOUP = False
|
||||||
|
BeautifulSoup = None
|
||||||
|
|
||||||
|
from aiohttp import ClientTimeout
|
||||||
|
from ..errors import MissingRequirementsError
|
||||||
from ..typing import AsyncResult, Messages
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
from .. import debug
|
||||||
|
|
||||||
|
|
||||||
class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "API Robocoders AI"
|
label = "API Robocoders AI"
|
||||||
url = "https://api.robocoders.ai/docs"
|
url = "https://api.robocoders.ai/docs"
|
||||||
@ -16,6 +30,9 @@ class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
agent = [default_model, "RepoAgent", "FrontEndAgent"]
|
agent = [default_model, "RepoAgent", "FrontEndAgent"]
|
||||||
models = [*agent]
|
models = [*agent]
|
||||||
|
|
||||||
|
CACHE_DIR = Path(__file__).parent / ".cache"
|
||||||
|
CACHE_FILE = CACHE_DIR / "robocoders.json"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
@ -24,14 +41,14 @@ class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
async with aiohttp.ClientSession() as session:
|
|
||||||
access_token = await cls._get_access_token(session)
|
timeout = ClientTimeout(total=600)
|
||||||
if not access_token:
|
|
||||||
raise Exception("Failed to get access token")
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
|
# Load or create access token and session ID
|
||||||
session_id = await cls._create_session(session, access_token)
|
access_token, session_id = await cls._get_or_create_access_and_session(session)
|
||||||
if not session_id:
|
if not access_token or not session_id:
|
||||||
raise Exception("Failed to create session")
|
raise Exception("Failed to initialize API interaction")
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
@ -45,38 +62,116 @@ class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"agent": model
|
"agent": model
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
|
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
|
||||||
if response.status != 200:
|
if response.status == 401: # Unauthorized, refresh token
|
||||||
raise Exception(f"Error: {response.status}")
|
cls._clear_cached_data()
|
||||||
|
raise Exception("Unauthorized: Invalid token, please retry.")
|
||||||
|
elif response.status == 422:
|
||||||
|
raise Exception("Validation Error: Invalid input.")
|
||||||
|
elif response.status >= 500:
|
||||||
|
raise Exception(f"Server Error: {response.status}")
|
||||||
|
elif response.status != 200:
|
||||||
|
raise Exception(f"Unexpected Error: {response.status}")
|
||||||
|
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
if line:
|
if line:
|
||||||
try:
|
try:
|
||||||
response_data = json.loads(line)
|
# Decode bytes into a string
|
||||||
message = response_data.get('message', '')
|
line_str = line.decode('utf-8')
|
||||||
|
response_data = json.loads(line_str)
|
||||||
|
|
||||||
|
# Get the message from the 'args.content' or 'message' field
|
||||||
|
message = (response_data.get('args', {}).get('content') or
|
||||||
|
response_data.get('message', ''))
|
||||||
|
|
||||||
if message:
|
if message:
|
||||||
yield message
|
yield message
|
||||||
|
|
||||||
|
# Check for reaching the resource limit
|
||||||
|
if (response_data.get('action') == 'message' and
|
||||||
|
response_data.get('args', {}).get('wait_for_response')):
|
||||||
|
# Automatically continue the dialog
|
||||||
|
continue_data = {
|
||||||
|
"sid": session_id,
|
||||||
|
"prompt": "continue",
|
||||||
|
"agent": model
|
||||||
|
}
|
||||||
|
async with session.post(
|
||||||
|
cls.api_endpoint,
|
||||||
|
headers=headers,
|
||||||
|
json=continue_data,
|
||||||
|
proxy=proxy
|
||||||
|
) as continue_response:
|
||||||
|
if continue_response.status == 200:
|
||||||
|
async for continue_line in continue_response.content:
|
||||||
|
if continue_line:
|
||||||
|
try:
|
||||||
|
continue_line_str = continue_line.decode('utf-8')
|
||||||
|
continue_data = json.loads(continue_line_str)
|
||||||
|
continue_message = (
|
||||||
|
continue_data.get('args', {}).get('content') or
|
||||||
|
continue_data.get('message', '')
|
||||||
|
)
|
||||||
|
if continue_message:
|
||||||
|
yield continue_message
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
debug.log(f"Failed to decode continue JSON: {continue_line}")
|
||||||
|
except Exception as e:
|
||||||
|
debug.log(f"Error processing continue response: {e}")
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
pass
|
debug.log(f"Failed to decode JSON: {line}")
|
||||||
|
except Exception as e:
|
||||||
|
debug.log(f"Error processing response: {e}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def _get_access_token(session: aiohttp.ClientSession) -> str:
|
async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
|
||||||
|
RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
|
||||||
|
|
||||||
|
# Load data from cache
|
||||||
|
if RobocodersAPI.CACHE_FILE.exists():
|
||||||
|
with open(RobocodersAPI.CACHE_FILE, "r") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
access_token = data.get("access_token")
|
||||||
|
session_id = data.get("sid")
|
||||||
|
|
||||||
|
# Validate loaded data
|
||||||
|
if access_token and session_id:
|
||||||
|
return access_token, session_id
|
||||||
|
|
||||||
|
# If data not valid, create new access token and session ID
|
||||||
|
access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
|
||||||
|
session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
|
||||||
|
return access_token, session_id
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
|
||||||
|
if not HAS_BEAUTIFULSOUP:
|
||||||
|
raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
|
||||||
|
return token
|
||||||
|
|
||||||
url_auth = 'https://api.robocoders.ai/auth'
|
url_auth = 'https://api.robocoders.ai/auth'
|
||||||
headers_auth = {
|
headers_auth = {
|
||||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.get(url_auth, headers=headers_auth) as response:
|
async with session.get(url_auth, headers=headers_auth) as response:
|
||||||
if response.status == 200:
|
if response.status == 200:
|
||||||
text = await response.text()
|
html = await response.text()
|
||||||
return text.split('id="token">')[1].split('</pre>')[0].strip()
|
soup = BeautifulSoup(html, 'html.parser')
|
||||||
|
token_element = soup.find('pre', id='token')
|
||||||
|
if token_element:
|
||||||
|
token = token_element.text.strip()
|
||||||
|
|
||||||
|
# Cache the token
|
||||||
|
RobocodersAPI._save_cached_data({"access_token": token})
|
||||||
|
return token
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def _create_session(session: aiohttp.ClientSession, access_token: str) -> str:
|
async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
|
||||||
url_create_session = 'https://api.robocoders.ai/create-session'
|
url_create_session = 'https://api.robocoders.ai/create-session'
|
||||||
headers_create_session = {
|
headers_create_session = {
|
||||||
'Authorization': f'Bearer {access_token}'
|
'Authorization': f'Bearer {access_token}'
|
||||||
@ -85,6 +180,58 @@ class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
async with session.get(url_create_session, headers=headers_create_session) as response:
|
async with session.get(url_create_session, headers=headers_create_session) as response:
|
||||||
if response.status == 200:
|
if response.status == 200:
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
return data.get('sid')
|
session_id = data.get('sid')
|
||||||
|
|
||||||
|
# Cache session ID
|
||||||
|
RobocodersAPI._update_cached_data({"sid": session_id})
|
||||||
|
return session_id
|
||||||
|
elif response.status == 401:
|
||||||
|
RobocodersAPI._clear_cached_data()
|
||||||
|
raise Exception("Unauthorized: Invalid token during session creation.")
|
||||||
|
elif response.status == 422:
|
||||||
|
raise Exception("Validation Error: Check input parameters.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _save_cached_data(new_data: dict):
|
||||||
|
"""Save new data to cache file"""
|
||||||
|
RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
|
||||||
|
RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
|
||||||
|
with open(RobocodersAPI.CACHE_FILE, "w") as f:
|
||||||
|
json.dump(new_data, f)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _update_cached_data(updated_data: dict):
|
||||||
|
"""Update existing cache data with new values"""
|
||||||
|
data = {}
|
||||||
|
if RobocodersAPI.CACHE_FILE.exists():
|
||||||
|
with open(RobocodersAPI.CACHE_FILE, "r") as f:
|
||||||
|
try:
|
||||||
|
data = json.load(f)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# If cache file is corrupted, start with empty dict
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
data.update(updated_data)
|
||||||
|
with open(RobocodersAPI.CACHE_FILE, "w") as f:
|
||||||
|
json.dump(data, f)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _clear_cached_data():
|
||||||
|
"""Remove cache file"""
|
||||||
|
try:
|
||||||
|
if RobocodersAPI.CACHE_FILE.exists():
|
||||||
|
RobocodersAPI.CACHE_FILE.unlink()
|
||||||
|
except Exception as e:
|
||||||
|
debug.log(f"Error clearing cache: {e}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_cached_data() -> dict:
|
||||||
|
"""Get all cached data"""
|
||||||
|
if RobocodersAPI.CACHE_FILE.exists():
|
||||||
|
try:
|
||||||
|
with open(RobocodersAPI.CACHE_FILE, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
return {}
|
||||||
|
@ -30,6 +30,7 @@ from .MagickPen import MagickPen
|
|||||||
from .PerplexityLabs import PerplexityLabs
|
from .PerplexityLabs import PerplexityLabs
|
||||||
from .Pi import Pi
|
from .Pi import Pi
|
||||||
from .Pizzagpt import Pizzagpt
|
from .Pizzagpt import Pizzagpt
|
||||||
|
from .PollinationsAI import PollinationsAI
|
||||||
from .Prodia import Prodia
|
from .Prodia import Prodia
|
||||||
from .Reka import Reka
|
from .Reka import Reka
|
||||||
from .ReplicateHome import ReplicateHome
|
from .ReplicateHome import ReplicateHome
|
||||||
|
@ -16,7 +16,7 @@ except ImportError:
|
|||||||
|
|
||||||
from ... import debug
|
from ... import debug
|
||||||
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
|
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
|
||||||
from ..base_provider import AsyncGeneratorProvider, BaseConversation, SynthesizeData
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation, SynthesizeData
|
||||||
from ..helper import format_prompt, get_cookies
|
from ..helper import format_prompt, get_cookies
|
||||||
from ...requests.raise_for_status import raise_for_status
|
from ...requests.raise_for_status import raise_for_status
|
||||||
from ...requests.aiohttp import get_connector
|
from ...requests.aiohttp import get_connector
|
||||||
@ -50,7 +50,7 @@ UPLOAD_IMAGE_HEADERS = {
|
|||||||
"x-tenant-id": "bard-storage",
|
"x-tenant-id": "bard-storage",
|
||||||
}
|
}
|
||||||
|
|
||||||
class Gemini(AsyncGeneratorProvider):
|
class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://gemini.google.com"
|
url = "https://gemini.google.com"
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
working = True
|
working = True
|
||||||
@ -329,4 +329,4 @@ async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterat
|
|||||||
chunk = buffer + chunk
|
chunk = buffer + chunk
|
||||||
rest = len(chunk) % 4
|
rest = len(chunk) % 4
|
||||||
buffer = chunk[-rest:]
|
buffer = chunk[-rest:]
|
||||||
yield base64.b64decode(chunk[:-rest])
|
yield base64.b64decode(chunk[:-rest])
|
||||||
|
@ -1,70 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from urllib.parse import quote
|
|
||||||
import random
|
|
||||||
import requests
|
|
||||||
from aiohttp import ClientSession
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
from ...image import ImageResponse
|
|
||||||
from ...requests.raise_for_status import raise_for_status
|
|
||||||
from ...requests.aiohttp import get_connector
|
|
||||||
from .OpenaiAPI import OpenaiAPI
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
class PollinationsAI(OpenaiAPI):
|
|
||||||
label = "Pollinations.AI"
|
|
||||||
url = "https://pollinations.ai"
|
|
||||||
working = True
|
|
||||||
needs_auth = False
|
|
||||||
supports_stream = True
|
|
||||||
default_model = "openai"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_models(cls):
|
|
||||||
if not cls.image_models:
|
|
||||||
url = "https://image.pollinations.ai/models"
|
|
||||||
response = requests.get(url)
|
|
||||||
raise_for_status(response)
|
|
||||||
cls.image_models = response.json()
|
|
||||||
if not cls.models:
|
|
||||||
url = "https://text.pollinations.ai/models"
|
|
||||||
response = requests.get(url)
|
|
||||||
raise_for_status(response)
|
|
||||||
cls.models = [model.get("name") for model in response.json()]
|
|
||||||
cls.models.extend(cls.image_models)
|
|
||||||
return cls.models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
prompt: str = None,
|
|
||||||
api_base: str = "https://text.pollinations.ai/openai",
|
|
||||||
api_key: str = None,
|
|
||||||
proxy: str = None,
|
|
||||||
seed: str = None,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
if model in cls.image_models:
|
|
||||||
if prompt is None:
|
|
||||||
prompt = messages[-1]["content"]
|
|
||||||
if seed is None:
|
|
||||||
seed = random.randint(0, 100000)
|
|
||||||
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width=1024&height=1024&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
|
|
||||||
yield ImageResponse(image, prompt)
|
|
||||||
return
|
|
||||||
if api_key is None:
|
|
||||||
async with ClientSession(connector=get_connector(proxy=proxy)) as session:
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
async for line in response.content.iter_any():
|
|
||||||
yield line.decode(errors="ignore")
|
|
||||||
else:
|
|
||||||
async for chunk in super().create_async_generator(
|
|
||||||
model, messages, api_base=api_base, proxy=proxy, **kwargs
|
|
||||||
):
|
|
||||||
yield chunk
|
|
@ -20,7 +20,6 @@ from .OpenaiAPI import OpenaiAPI
|
|||||||
from .OpenaiChat import OpenaiChat
|
from .OpenaiChat import OpenaiChat
|
||||||
from .PerplexityApi import PerplexityApi
|
from .PerplexityApi import PerplexityApi
|
||||||
from .Poe import Poe
|
from .Poe import Poe
|
||||||
from .PollinationsAI import PollinationsAI
|
|
||||||
from .Raycast import Raycast
|
from .Raycast import Raycast
|
||||||
from .Replicate import Replicate
|
from .Replicate import Replicate
|
||||||
from .Theb import Theb
|
from .Theb import Theb
|
||||||
|
113
g4f/models.py
113
g4f/models.py
@ -14,6 +14,7 @@ from .Provider import (
|
|||||||
Cloudflare,
|
Cloudflare,
|
||||||
Copilot,
|
Copilot,
|
||||||
CopilotAccount,
|
CopilotAccount,
|
||||||
|
DarkAI,
|
||||||
DDG,
|
DDG,
|
||||||
DeepInfraChat,
|
DeepInfraChat,
|
||||||
Free2GPT,
|
Free2GPT,
|
||||||
@ -33,6 +34,7 @@ from .Provider import (
|
|||||||
PerplexityLabs,
|
PerplexityLabs,
|
||||||
Pi,
|
Pi,
|
||||||
Pizzagpt,
|
Pizzagpt,
|
||||||
|
PollinationsAI,
|
||||||
Reka,
|
Reka,
|
||||||
ReplicateHome,
|
ReplicateHome,
|
||||||
RubiksAI,
|
RubiksAI,
|
||||||
@ -93,20 +95,20 @@ default = Model(
|
|||||||
gpt_35_turbo = Model(
|
gpt_35_turbo = Model(
|
||||||
name = 'gpt-3.5-turbo',
|
name = 'gpt-3.5-turbo',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = Blackbox
|
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI])
|
||||||
)
|
)
|
||||||
|
|
||||||
# gpt-4
|
# gpt-4
|
||||||
gpt_4o = Model(
|
gpt_4o = Model(
|
||||||
name = 'gpt-4o',
|
name = 'gpt-4o',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([Blackbox, ChatGptEs, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4o_mini = Model(
|
gpt_4o_mini = Model(
|
||||||
name = 'gpt-4o-mini',
|
name = 'gpt-4o-mini',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([DDG, ChatGptEs, Pizzagpt, ChatGpt, AmigoChat, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
|
best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, Pizzagpt, ChatGpt, AmigoChat, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4_turbo = Model(
|
gpt_4_turbo = Model(
|
||||||
@ -118,7 +120,7 @@ gpt_4_turbo = Model(
|
|||||||
gpt_4 = Model(
|
gpt_4 = Model(
|
||||||
name = 'gpt-4',
|
name = 'gpt-4',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([DDG, Copilot, OpenaiChat, Liaobots, Airforce])
|
best_provider = IterListProvider([DDG, Blackbox, PollinationsAI, Copilot, OpenaiChat, Liaobots, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
# o1
|
# o1
|
||||||
@ -171,7 +173,7 @@ llama_3_1_8b = Model(
|
|||||||
llama_3_1_70b = Model(
|
llama_3_1_70b = Model(
|
||||||
name = "llama-3.1-70b",
|
name = "llama-3.1-70b",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
||||||
)
|
)
|
||||||
|
|
||||||
llama_3_1_405b = Model(
|
llama_3_1_405b = Model(
|
||||||
@ -228,7 +230,13 @@ mistral_tiny = Model(
|
|||||||
mistral_nemo = Model(
|
mistral_nemo = Model(
|
||||||
name = "mistral-nemo",
|
name = "mistral-nemo",
|
||||||
base_provider = "Mistral",
|
base_provider = "Mistral",
|
||||||
best_provider = IterListProvider([HuggingChat, AmigoChat, HuggingFace])
|
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat, HuggingFace])
|
||||||
|
)
|
||||||
|
|
||||||
|
mistral_large = Model(
|
||||||
|
name = "mistral-large",
|
||||||
|
base_provider = "Mistral",
|
||||||
|
best_provider = PollinationsAI
|
||||||
)
|
)
|
||||||
|
|
||||||
### NousResearch ###
|
### NousResearch ###
|
||||||
@ -320,7 +328,7 @@ claude_3_haiku = Model(
|
|||||||
claude_3_5_sonnet = Model(
|
claude_3_5_sonnet = Model(
|
||||||
name = 'claude-3.5-sonnet',
|
name = 'claude-3.5-sonnet',
|
||||||
base_provider = 'Anthropic',
|
base_provider = 'Anthropic',
|
||||||
best_provider = IterListProvider([Blackbox, AmigoChat, Liaobots])
|
best_provider = IterListProvider([Blackbox, PollinationsAI, AmigoChat, Liaobots])
|
||||||
)
|
)
|
||||||
|
|
||||||
claude_3_5_haiku = Model(
|
claude_3_5_haiku = Model(
|
||||||
@ -353,7 +361,7 @@ blackboxai_pro = Model(
|
|||||||
command_r_plus = Model(
|
command_r_plus = Model(
|
||||||
name = 'command-r-plus',
|
name = 'command-r-plus',
|
||||||
base_provider = 'CohereForAI',
|
base_provider = 'CohereForAI',
|
||||||
best_provider = IterListProvider([HuggingChat, AmigoChat])
|
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
### Qwen ###
|
### Qwen ###
|
||||||
@ -381,7 +389,7 @@ qwen_2_5_72b = Model(
|
|||||||
qwen_2_5_coder_32b = Model(
|
qwen_2_5_coder_32b = Model(
|
||||||
name = 'qwen-2.5-coder-32b',
|
name = 'qwen-2.5-coder-32b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, HuggingChat, HuggingFace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwq_32b = Model(
|
qwq_32b = Model(
|
||||||
@ -431,13 +439,6 @@ wizardlm_2_8x22b = Model(
|
|||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfraChat
|
||||||
)
|
)
|
||||||
|
|
||||||
### Yorickvp ###
|
|
||||||
llava_13b = Model(
|
|
||||||
name = 'llava-13b',
|
|
||||||
base_provider = 'Yorickvp',
|
|
||||||
best_provider = ReplicateHome
|
|
||||||
)
|
|
||||||
|
|
||||||
### OpenChat ###
|
### OpenChat ###
|
||||||
openchat_3_5 = Model(
|
openchat_3_5 = Model(
|
||||||
name = 'openchat-3.5',
|
name = 'openchat-3.5',
|
||||||
@ -551,11 +552,18 @@ jamba_mini = Model(
|
|||||||
best_provider = AmigoChat
|
best_provider = AmigoChat
|
||||||
)
|
)
|
||||||
|
|
||||||
### llmplayground.net ###
|
### PollinationsAI ###
|
||||||
any_uncensored = Model(
|
p1 = Model(
|
||||||
name = 'any-uncensored',
|
name = 'p1',
|
||||||
base_provider = 'llmplayground.net',
|
base_provider = 'PollinationsAI',
|
||||||
best_provider = Airforce
|
best_provider = PollinationsAI
|
||||||
|
)
|
||||||
|
|
||||||
|
### Uncensored AI ###
|
||||||
|
evil = Model(
|
||||||
|
name = 'evil',
|
||||||
|
base_provider = 'Evil Mode - Experimental',
|
||||||
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
#############
|
#############
|
||||||
@ -588,13 +596,13 @@ playground_v2_5 = ImageModel(
|
|||||||
flux = ImageModel(
|
flux = ImageModel(
|
||||||
name = 'flux',
|
name = 'flux',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
best_provider = IterListProvider([Blackbox, Airforce])
|
best_provider = IterListProvider([Blackbox, Blackbox2, PollinationsAI, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_pro = ImageModel(
|
flux_pro = ImageModel(
|
||||||
name = 'flux-pro',
|
name = 'flux-pro',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
best_provider = Airforce
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_dev = ImageModel(
|
flux_dev = ImageModel(
|
||||||
@ -606,19 +614,25 @@ flux_dev = ImageModel(
|
|||||||
flux_realism = ImageModel(
|
flux_realism = ImageModel(
|
||||||
name = 'flux-realism',
|
name = 'flux-realism',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
best_provider = IterListProvider([Airforce, AmigoChat])
|
best_provider = IterListProvider([PollinationsAI, Airforce, AmigoChat])
|
||||||
|
)
|
||||||
|
|
||||||
|
flux_cablyai = Model(
|
||||||
|
name = 'flux-cablyai',
|
||||||
|
base_provider = 'Flux AI',
|
||||||
|
best_provider = PollinationsAI
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_anime = ImageModel(
|
flux_anime = ImageModel(
|
||||||
name = 'flux-anime',
|
name = 'flux-anime',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
best_provider = Airforce
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_3d = ImageModel(
|
flux_3d = ImageModel(
|
||||||
name = 'flux-3d',
|
name = 'flux-3d',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
best_provider = Airforce
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_disney = ImageModel(
|
flux_disney = ImageModel(
|
||||||
@ -653,11 +667,36 @@ recraft_v3 = ImageModel(
|
|||||||
best_provider = AmigoChat
|
best_provider = AmigoChat
|
||||||
)
|
)
|
||||||
|
|
||||||
|
### Midjourney ###
|
||||||
|
midijourney = Model(
|
||||||
|
name = 'midijourney',
|
||||||
|
base_provider = 'Midjourney',
|
||||||
|
best_provider = PollinationsAI
|
||||||
|
)
|
||||||
|
|
||||||
### Other ###
|
### Other ###
|
||||||
any_dark = ImageModel(
|
any_dark = ImageModel(
|
||||||
name = 'any-dark',
|
name = 'any-dark',
|
||||||
base_provider = 'Other',
|
base_provider = 'Other',
|
||||||
best_provider = Airforce
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||||
|
)
|
||||||
|
|
||||||
|
turbo = Model(
|
||||||
|
name = 'turbo',
|
||||||
|
base_provider = 'Other',
|
||||||
|
best_provider = PollinationsAI
|
||||||
|
)
|
||||||
|
|
||||||
|
unity = Model(
|
||||||
|
name = 'unity',
|
||||||
|
base_provider = 'Other',
|
||||||
|
best_provider = PollinationsAI
|
||||||
|
)
|
||||||
|
|
||||||
|
rtist = Model(
|
||||||
|
name = 'rtist',
|
||||||
|
base_provider = 'Other',
|
||||||
|
best_provider = PollinationsAI
|
||||||
)
|
)
|
||||||
|
|
||||||
class ModelUtils:
|
class ModelUtils:
|
||||||
@ -716,6 +755,7 @@ class ModelUtils:
|
|||||||
'mixtral-8x7b': mixtral_8x7b,
|
'mixtral-8x7b': mixtral_8x7b,
|
||||||
'mistral-tiny': mistral_tiny,
|
'mistral-tiny': mistral_tiny,
|
||||||
'mistral-nemo': mistral_nemo,
|
'mistral-nemo': mistral_nemo,
|
||||||
|
'mistral-large': mistral_large,
|
||||||
|
|
||||||
### NousResearch ###
|
### NousResearch ###
|
||||||
'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
|
'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
|
||||||
@ -778,9 +818,6 @@ class ModelUtils:
|
|||||||
### Inflection ###
|
### Inflection ###
|
||||||
'pi': pi,
|
'pi': pi,
|
||||||
|
|
||||||
### Yorickvp ###
|
|
||||||
'llava-13b': llava_13b,
|
|
||||||
|
|
||||||
### WizardLM ###
|
### WizardLM ###
|
||||||
'wizardlm-2-8x22b': wizardlm_2_8x22b,
|
'wizardlm-2-8x22b': wizardlm_2_8x22b,
|
||||||
|
|
||||||
@ -830,9 +867,12 @@ class ModelUtils:
|
|||||||
### Gryphe ###
|
### Gryphe ###
|
||||||
'mythomax-13b': mythomax_13b,
|
'mythomax-13b': mythomax_13b,
|
||||||
|
|
||||||
### llmplayground.net ###
|
### PollinationsAI ###
|
||||||
'any-uncensored': any_uncensored,
|
'p1': p1,
|
||||||
|
|
||||||
|
### Uncensored AI ###
|
||||||
|
'evil': evil,
|
||||||
|
|
||||||
#############
|
#############
|
||||||
### Image ###
|
### Image ###
|
||||||
#############
|
#############
|
||||||
@ -849,6 +889,7 @@ class ModelUtils:
|
|||||||
'flux-pro': flux_pro,
|
'flux-pro': flux_pro,
|
||||||
'flux-dev': flux_dev,
|
'flux-dev': flux_dev,
|
||||||
'flux-realism': flux_realism,
|
'flux-realism': flux_realism,
|
||||||
|
'flux-cablyai': flux_cablyai,
|
||||||
'flux-anime': flux_anime,
|
'flux-anime': flux_anime,
|
||||||
'flux-3d': flux_3d,
|
'flux-3d': flux_3d,
|
||||||
'flux-disney': flux_disney,
|
'flux-disney': flux_disney,
|
||||||
@ -861,8 +902,14 @@ class ModelUtils:
|
|||||||
### Recraft ###
|
### Recraft ###
|
||||||
'recraft-v3': recraft_v3,
|
'recraft-v3': recraft_v3,
|
||||||
|
|
||||||
|
### Midjourney ###
|
||||||
|
'midijourney': midijourney,
|
||||||
|
|
||||||
### Other ###
|
### Other ###
|
||||||
'any-dark': any_dark,
|
'any-dark': any_dark,
|
||||||
|
'turbo': turbo,
|
||||||
|
'unity': unity,
|
||||||
|
'rtist': rtist,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create a list of all working models
|
# Create a list of all working models
|
||||||
|
Loading…
Reference in New Issue
Block a user