Comprehensive Updates and Enhancements Across Multiple Providers

This commit is contained in:
kqlio67 2024-08-31 09:47:39 +03:00
parent c138f30c84
commit f1683c8db8
15 changed files with 639 additions and 396 deletions

View File

@ -21,6 +21,20 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b',
'llama-3.1-405b',
]
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
@ -55,6 +69,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
model = cls.get_model(model) # Resolve the model alias
model_id_map = {
"blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
@ -62,6 +79,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
}
data = {
"messages": messages,
"id": random_id,
@ -75,7 +93,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"webSearchMode": False,
"userSystemPrompt": "",
"githubToken": None,
"trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
"trendingAgentModel": model_id_map.get(model, {}), # Default to empty dict if model not found
"maxTokens": None
}

View File

@ -43,6 +43,7 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(
headers=headers,
cookies=cookies,
@ -55,6 +56,12 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
async with session.get(f"{cls.url}/") as response:
await raise_for_status(response)
response = await response.text()
result = re.search(r'data-post-id="([0-9]+)"', response)
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)

View File

@ -21,8 +21,6 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "gpt-4o-mini"
models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
model_aliases = {
"gpt-4": "gpt-4o-mini",
"gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"

View File

@ -0,0 +1,69 @@
from __future__ import annotations
from aiohttp import ClientSession
from urllib.parse import urlencode
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flux.api.airforce/"
api_endpoint = "https://api.airforce/v1/imagine2"
working = True
default_model = 'flux-realism'
models = [
'flux',
'flux-realism',
'flux-anime',
'flux-3d',
'flux-disney'
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"origin": "https://flux.api.airforce",
"priority": "u=1, i",
"referer": "https://flux.api.airforce/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
# Assume the last message in the messages list is the prompt
prompt = messages[-1]['content'] if messages else ""
params = {
"prompt": prompt,
"size": kwargs.get("size", "1:1"),
"seed": kwargs.get("seed"),
"model": model
}
# Remove None values from params
params = {k: v for k, v in params.items() if v is not None}
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
response.raise_for_status()
# Get the URL of the generated image
image_url = str(response.url)
# Create an ImageResponse object
image_response = ImageResponse(image_url, prompt)
yield image_response

View File

@ -10,19 +10,16 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.chatgpt.org.uk"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
supports_gpt_35_turbo = True
default_model = 'gpt-3.5-turbo'
default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
models = [
'gpt-3.5-turbo',
'SparkDesk-v1.1',
'deepseek-coder',
'@cf/qwen/qwen1.5-14b-chat-awq',
'deepseek-chat',
'SparkDesk-v1.1',
'Qwen2-7B-Instruct',
'glm4-9B-chat',
'chatglm3-6B',
'Yi-1.5-9B-Chat',
]
model_aliases = {
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"sparkdesk-v1.1": "SparkDesk-v1.1",
@ -49,6 +46,8 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
@ -64,7 +63,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
@ -93,6 +91,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
chunk = json.loads(line_str[6:])
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
accumulated_text += delta_content
yield delta_content
yield delta_content # Yield each chunk of content
except json.JSONDecodeError:
pass

View File

@ -71,12 +71,17 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}
print(model)
json_data = {
'model': model,
}
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
conversationId = response.json()['conversationId']
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
data: list = (response.json())["nodes"][1]["data"]
keys: list[int] = data[data[0]["messages"]]
message_keys: dict = data[keys[0]]
@ -117,6 +122,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
headers=headers,
files=files,
)
first_token = True
for line in response.iter_lines():
line = json.loads(line)

View File

@ -1,11 +1,14 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..errors import RateLimitError, ModelNotFoundError
from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
@ -106,6 +109,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
yield chunk
else:
yield (await response.json())[0]["generated_text"].strip()
def format_prompt(messages: Messages) -> str:
system_messages = [message["content"] for message in messages if message["role"] == "system"]
question = " ".join([messages[-1]["content"], *system_messages])

View File

@ -4,16 +4,17 @@ import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import Messages
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, get_connector
from ..requests import raise_for_status
class Koala(AsyncGeneratorProvider):
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh"
working = True
supports_gpt_35_turbo = True
supports_message_history = True
supports_gpt_4 = True
default_model = 'gpt-4o-mini'
@classmethod
async def create_async_generator(

View File

@ -1,7 +1,6 @@
from __future__ import annotations
import uuid
import requests
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
@ -9,15 +8,163 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..requests import raise_for_status
models = {
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 31200,
"tokenLimit": 7800,
"context": "8K",
},
"gpt-4o-mini": {
"id": "gpt-4o-mini",
"name": "GPT-4o-Mini",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4o-free": {
"id": "gpt-4o-free",
"name": "GPT-4o-free",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 31200,
"tokenLimit": 7800,
"context": "8K",
},
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4o-2024-08-06": {
"id": "gpt-4o-2024-08-06",
"name": "GPT-4o",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4-0613",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 32000,
"tokenLimit": 7600,
"context": "8K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229-aws": {
"id": "claude-3-opus-20240229-aws",
"name": "Claude-3-Opus-Aws",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229-gcp": {
"id": "claude-3-opus-20240229-gcp",
"name": "Claude-3-Opus-Gcp",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"gemini-1.0-pro-latest": {
"id": "gemini-1.0-pro-latest",
"name": "Gemini-Pro",
"model": "Gemini",
"provider": "Google",
"maxLength": 120000,
"tokenLimit": 30000,
"context": "32K",
},
"gemini-1.5-flash-latest": {
"id": "gemini-1.5-flash-latest",
"name": "Gemini-1.5-Flash-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-pro-latest": {
"id": "gemini-1.5-pro-latest",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
}
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-4o"
models = None
models = list(models.keys())
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
@ -37,45 +184,27 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"gemini-flash": "gemini-1.5-flash-latest",
"gemini-pro": "gemini-1.5-pro-latest",
}
_auth_code = ""
_cookie_jar = None
@classmethod
def get_models(cls):
if cls.models is None:
url = 'https://liaobots.work/api/models'
headers = {
'accept': '/',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'cookie': 'gkp2=ehnhUPJtkCgMmod8Sbxn',
'origin': 'https://liaobots.work',
'priority': 'u=1, i',
'referer': 'https://liaobots.work/',
'sec-ch-ua': '"Chromium";v="127", "Not)A;Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
}
data = {'key': ''}
def get_model(cls, model: str) -> str:
"""
Retrieve the internal model identifier based on the provided model name or alias.
"""
if model in cls.model_aliases:
model = cls.model_aliases[model]
if model not in models:
raise ValueError(f"Model '{model}' is not supported.")
return model
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
try:
models_data = response.json()
cls.models = {model['id']: model for model in models_data}
except (ValueError, KeyError) as e:
print(f"Error processing JSON response: {e}")
cls.models = {}
else:
print(f"Request failed with status code: {response.status_code}")
cls.models = {}
return cls.models
@classmethod
def is_supported(cls, model: str) -> bool:
"""
Check if the given model is supported.
"""
return model in models or model in cls.model_aliases
@classmethod
async def create_async_generator(
@ -87,6 +216,8 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"authority": "liaobots.com",
"content-type": "application/json",
@ -99,10 +230,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True)
) as session:
models = cls.get_models()
data = {
"conversationId": str(uuid.uuid4()),
"model": models[cls.get_model(model)],
"model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@ -115,21 +245,39 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
await raise_for_status(response)
try:
await cls.ensure_auth_code(session)
async with session.post(
"https://liaobots.work/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
"https://liaobots.work/api/user",
json={"authcode": cls._auth_code},
verify_ssl=False
) as response:
await raise_for_status(response)
async for chunk in response.content.iter_any():
if b"<html coupert-item=" in chunk:
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
except:
await cls.initialize_auth_code(session)
cls._auth_code = (await response.json(content_type=None))["authCode"]
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
async with session.post(
"https://liaobots.work/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
verify_ssl=False
) as response:
await raise_for_status(response)
async for chunk in response.content.iter_any():
if b"<html coupert-item=" in chunk:
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
except:
async with session.post(
"https://liaobots.work/api/user",
json={"authcode": "pTIQr4FTnVRfr"},
verify_ssl=False
) as response:
await raise_for_status(response)
cls._auth_code = (await response.json(content_type=None))["authCode"]
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
async with session.post(
"https://liaobots.work/api/chat",
json=data,
@ -142,24 +290,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
@classmethod
def get_model(cls, model: str) -> str:
"""
Retrieve the internal model identifier based on the provided model name or alias.
"""
if model in cls.model_aliases:
model = cls.model_aliases[model]
models = cls.get_models()
if model not in models:
raise ValueError(f"Model '{model}' is not supported.")
return model
@classmethod
def is_supported(cls, model: str) -> bool:
"""
Check if the given model is supported.
"""
models = cls.get_models()
return model in models or model in cls.model_aliases
@classmethod
async def initialize_auth_code(cls, session: ClientSession) -> None:
@ -176,6 +306,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
@classmethod
async def ensure_auth_code(cls, session: ClientSession) -> None:
"""
@ -183,18 +314,3 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"""
if not cls._auth_code:
await cls.initialize_auth_code(session)
@classmethod
async def refresh_auth_code(cls, session: ClientSession) -> None:
"""
Refresh the auth code by making a new request.
"""
await cls.initialize_auth_code(session)
@classmethod
async def get_auth_code(cls, session: ClientSession) -> str:
"""
Get the current auth code, initializing it if necessary.
"""
await cls.ensure_auth_code(session)
return cls._auth_code

View File

@ -1,6 +1,7 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError
import re
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
@ -31,7 +32,7 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": "Bearer b3b2712cf83640a5acfdc01e78369930",
"Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4",
"Connection": "keep-alive",
"Content-Type": "application/json;charset=utf-8",
"DNT": "1",
@ -97,7 +98,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
.replace('\\"', '"')
.strip()
)
yield full_response.strip()
# Add filter to remove unwanted text
filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
# Remove extra quotes at the beginning and end
cleaned_response = filtered_response.strip().strip('"')
yield cleaned_response
except ClientResponseError as e:
raise RuntimeError(

74
g4f/Provider/Upstage.py Normal file
View File

@ -0,0 +1,74 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://console.upstage.ai/playground/chat"
api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
working = True
default_model = 'upstage/solar-1-mini-chat'
models = [
'upstage/solar-1-mini-chat',
'upstage/solar-1-mini-chat-ja',
]
model_aliases = {
"solar-1-mini": "upstage/solar-1-mini-chat",
"solar-1-mini": "upstage/solar-1-mini-chat-ja",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://console.upstage.ai",
"priority": "u=1, i",
"referer": "https://console.upstage.ai/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"stream": True,
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model
}
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
data = json.loads(line[6:])
content = data['choices'][0]['delta'].get('content', '')
if content:
yield content

View File

@ -24,7 +24,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4",

View File

@ -6,7 +6,6 @@ from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .not_working import *
from .selenium import *
from .needs_auth import *
@ -21,16 +20,15 @@ from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
from .Cohere import Cohere
from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
from .FluxAirforce import FluxAirforce
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
@ -43,18 +41,17 @@ from .Llama import Llama
from .Local import Local
from .MagickPenAsk import MagickPenAsk
from .MagickPenChat import MagickPenChat
from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Upstage import Upstage
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks
from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You

View File

@ -4,7 +4,6 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
AI365VIP,
Allyfy,
Bing,
Blackbox,
@ -15,21 +14,19 @@ from .Provider import (
DDG,
DeepInfra,
DeepInfraImage,
FluxAirforce,
FreeChatgpt,
FreeGpt,
FreeNetfly,
Gemini,
GeminiPro,
GeminiProChat,
GigaChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
LiteIcoding,
MagickPenAsk,
MagickPenChat,
Marsyoo,
MetaAI,
OpenaiChat,
PerplexityLabs,
@ -38,7 +35,7 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
TeachAnything,
Upstage,
You,
)
@ -66,33 +63,19 @@ default = Model(
name = "",
base_provider = "",
best_provider = IterListProvider([
Bing,
You,
OpenaiChat,
FreeChatgpt,
AI365VIP,
Chatgpt4o,
ChatGot,
Chatgpt4Online,
DDG,
ChatgptFree,
Koala,
Pizzagpt,
])
)
# GPT-3.5 too, but all providers supports long requests and responses
gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
FreeGpt,
You,
Koala,
ChatgptFree,
FreeChatgpt,
DDG,
AI365VIP,
FreeNetfly,
Gemini,
HuggingChat,
MagickPenAsk,
MagickPenChat,
Pizzagpt,
Allyfy,
ChatgptFree,
ReplicateHome,
Upstage,
])
)
@ -107,38 +90,16 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
FreeGpt,
You,
Koala,
ChatgptFree,
FreeChatgpt,
AI365VIP,
Pizzagpt,
Allyfy,
])
)
# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
Bing, Chatgpt4Online
])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
Bing, Liaobots, LiteIcoding
])
)
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding, MagickPenAsk,
Liaobots, Chatgpt4o, OpenaiChat,
])
)
@ -146,10 +107,26 @@ gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
best_provider = IterListProvider([
DDG, Liaobots, OpenaiChat, You, FreeNetfly, MagickPenChat,
DDG, Liaobots, You, FreeNetfly, MagickPenAsk, MagickPenChat, Pizzagpt, ChatgptFree, OpenaiChat, Koala,
])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
Liaobots, Bing
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
Chatgpt4Online, Bing,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
])
)
### GigaChat ###
gigachat = Model(
@ -161,81 +138,65 @@ gigachat = Model(
### Meta ###
meta = Model(
name = "meta",
name = "meta-ai",
base_provider = "meta",
best_provider = MetaAI
)
llama_3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
llama_3_8b = Model(
name = "llama-3-8b",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
best_provider = IterListProvider([DeepInfra, Replicate])
)
llama_3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
llama_3_70b = Model(
name = "llama-3-70b",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
best_provider = IterListProvider([ReplicateHome, DeepInfra, PerplexityLabs, Replicate])
)
llama_3_70b_instruct = Model(
name = "meta/meta-llama-3-70b-instruct",
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "meta",
best_provider = IterListProvider([ReplicateHome, TeachAnything])
best_provider = IterListProvider([Blackbox])
)
llama_3_70b_chat_hf = Model(
name = "meta-llama/Llama-3-70b-chat-hf",
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "meta",
best_provider = IterListProvider([DDG])
best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, HuggingFace])
)
llama_3_1_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
llama_3_1_405b = Model(
name = "llama-3.1-405b",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
best_provider = IterListProvider([HuggingChat, Blackbox, HuggingFace])
)
llama_3_1_405b_instruct_FP8 = Model(
name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
### Mistral ###
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
name = "mixtral-8x7b",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, DeepInfra, HuggingFace,])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
mistral_7b = Model(
name = "mistral-7b",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
best_provider = IterListProvider([HuggingChat, HuggingFace, DeepInfra])
)
### NousResearch ###
Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
name = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
base_provider = "NousResearch",
best_provider = IterListProvider([HuggingFace, HuggingChat])
)
### 01-ai ###
Yi_1_5_34B_chat = Model(
name = "01-ai/Yi-1.5-34B-Chat",
yi_1_5_34b = Model(
name = "yi-1.5-34b",
base_provider = "01-ai",
best_provider = IterListProvider([HuggingFace, HuggingChat])
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
### Microsoft ###
Phi_3_mini_4k_instruct = Model(
name = "microsoft/Phi-3-mini-4k-instruct",
phi_3_mini_4k = Model(
name = "phi-3-mini-4k",
base_provider = "Microsoft",
best_provider = IterListProvider([HuggingFace, HuggingChat])
)
@ -252,41 +213,22 @@ gemini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots, LiteIcoding])
best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots])
)
gemini_flash = Model(
name = 'gemini-flash',
base_provider = 'Google',
best_provider = IterListProvider([Liaobots])
)
gemini_1_5 = Model(
name = 'gemini-1.5',
base_provider = 'Google',
best_provider = IterListProvider([LiteIcoding])
best_provider = IterListProvider([Liaobots, Blackbox])
)
# gemma
gemma_2b_it = Model(
name = 'gemma-2b-it',
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
best_provider = IterListProvider([ReplicateHome])
)
gemma_2_9b_it = Model(
name = 'gemma-2-9b-it',
base_provider = 'Google',
best_provider = IterListProvider([PerplexityLabs])
)
gemma_2_27b_it = Model(
name = 'gemma-2-27b-it',
base_provider = 'Google',
best_provider = IterListProvider([PerplexityLabs])
)
### Anthropic ###
claude_2 = Model(
name = 'claude-2',
@ -309,13 +251,13 @@ claude_2_1 = Model(
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
best_provider = IterListProvider([Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
best_provider = IterListProvider([Liaobots])
)
claude_3_5_sonnet = Model(
@ -327,19 +269,7 @@ claude_3_5_sonnet = Model(
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
claude_3 = Model(
name = 'claude-3',
base_provider = 'Anthropic',
best_provider = IterListProvider([LiteIcoding])
)
claude_3_5 = Model(
name = 'claude-3.5',
base_provider = 'Anthropic',
best_provider = IterListProvider([LiteIcoding])
best_provider = IterListProvider([DDG, Liaobots])
)
@ -351,14 +281,6 @@ reka_core = Model(
)
### NVIDIA ###
nemotron_4_340b_instruct = Model(
name = 'nemotron-4-340b-instruct',
base_provider = 'NVIDIA',
best_provider = IterListProvider([PerplexityLabs])
)
### Blackbox ###
blackbox = Model(
name = 'blackbox',
@ -369,7 +291,7 @@ blackbox = Model(
### Databricks ###
dbrx_instruct = Model(
name = 'databricks/dbrx-instruct',
name = 'dbrx-instruct',
base_provider = 'Databricks',
best_provider = IterListProvider([DeepInfra])
)
@ -377,65 +299,57 @@ dbrx_instruct = Model(
### CohereForAI ###
command_r_plus = Model(
name = 'CohereForAI/c4ai-command-r-plus',
name = 'command-r-plus',
base_provider = 'CohereForAI',
best_provider = IterListProvider([HuggingChat])
)
### iFlytek ###
SparkDesk_v1_1 = Model(
name = 'SparkDesk-v1.1',
sparkdesk_v1_1 = Model(
name = 'sparkdesk-v1.1',
base_provider = 'iFlytek',
best_provider = IterListProvider([FreeChatgpt])
)
### DeepSeek ###
deepseek_coder = Model(
name = 'deepseek-coder',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
deepseek_chat = Model(
name = 'deepseek-chat',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
### Qwen ###
Qwen2_7B_instruct = Model(
name = 'Qwen2-7B-Instruct',
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt])
)
### Zhipu AI ###
glm4_9B_chat = Model(
name = 'glm4-9B-chat',
glm4_9b = Model(
name = 'glm4-9B',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
chatglm3_6B = Model(
name = 'chatglm3-6B',
chatglm3_6b = Model(
name = 'chatglm3-6b',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
### 01-ai ###
Yi_1_5_9B_chat = Model(
name = 'Yi-1.5-9B-Chat',
yi_1_5_9b = Model(
name = 'yi-1.5-9b',
base_provider = '01-ai',
best_provider = IterListProvider([FreeChatgpt])
)
### Other ###
### Pi ###
solar_1_mini = Model(
name = 'solar-1-mini',
base_provider = 'Upstage',
best_provider = IterListProvider([Upstage])
)
### Pi ###
pi = Model(
name = 'pi',
base_provider = 'inflection',
@ -449,30 +363,60 @@ pi = Model(
### Stability AI ###
sdxl = Model(
name = 'stability-ai/sdxl',
name = 'sdxl',
base_provider = 'Stability AI',
best_provider = IterListProvider([DeepInfraImage])
best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
)
stable_diffusion_3 = Model(
name = 'stability-ai/stable-diffusion-3',
sd_3 = Model(
name = 'sd-3',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
sdxl_lightning_4step = Model(
name = 'bytedance/sdxl-lightning-4step',
### Playground ###
playground_v2_5 = Model(
name = 'playground-v2.5',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
playground_v2_5_1024px_aesthetic = Model(
name = 'playgroundai/playground-v2.5-1024px-aesthetic',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
### Flux AI ###
flux = Model(
name = 'flux',
base_provider = 'Flux AI',
best_provider = IterListProvider([FluxAirforce])
)
flux_realism = Model(
name = 'flux-realism',
base_provider = 'Flux AI',
best_provider = IterListProvider([FluxAirforce])
)
flux_anime = Model(
name = 'flux-anime',
base_provider = 'Flux AI',
best_provider = IterListProvider([FluxAirforce])
)
flux_3d = Model(
name = 'flux-3d',
base_provider = 'Flux AI',
best_provider = IterListProvider([FluxAirforce])
)
flux_disney = Model(
name = 'flux-disney',
base_provider = 'Flux AI',
best_provider = IterListProvider([FluxAirforce])
)
@ -485,126 +429,133 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
############
### Text ###
############
############
### Text ###
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
'gpt-3.5-long': gpt_35_long,
### OpenAI ###
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
# gpt-4
'gpt-4o' : gpt_4o,
'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
'gpt-4-turbo' : gpt_4_turbo,
# gpt-4
'gpt-4o' : gpt_4o,
'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
'gpt-4-turbo' : gpt_4_turbo,
### Meta ###
"meta-ai": meta,
'llama-3-8b-instruct': llama_3_8b_instruct,
'llama-3-70b-instruct': llama_3_70b_instruct,
'llama-3-70b-chat': llama_3_70b_chat_hf,
'llama-3-70b-instruct': llama_3_70b_instruct,
### Meta ###
"meta-ai": meta,
'llama-3.1-70b': llama_3_1_70b_instruct,
'llama-3.1-405b': llama_3_1_405b_instruct_FP8,
'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,
# llama-3
'llama-3-8b': llama_3_8b,
'llama-3-70b': llama_3_70b,
### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b-v02': mistral_7b_v02,
# llama-3.1
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
### NousResearch ###
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
### 01-ai ###
'Yi-1.5-34b-chat': Yi_1_5_34B_chat,
### Mistral ###
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
### Microsoft ###
'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-pro': gemini_1_5,
'gemini-flash': gemini_flash,
### 01-ai ###
'yi-1.5-34b': yi_1_5_34b,
# gemma
'gemma-2b': gemma_2b_it,
'gemma-2-9b': gemma_2_9b_it,
'gemma-2-27b': gemma_2_27b_it,
### Anthropic ###
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
### Microsoft ###
'phi-3-mini-4k': phi_3_mini_4k,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
'claude-3-opus': claude_3,
'claude-3-5-sonnet': claude_3_5,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-flash': gemini_flash,
# gemma
'gemma-2b': gemma_2b,
### Anthropic ###
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
### Reka AI ###
'reka-core': reka_core,
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
'command-r+': command_r_plus,
### Databricks ###
'dbrx-instruct': dbrx_instruct,
### GigaChat ###
'gigachat': gigachat,
### iFlytek ###
'sparkdesk-v1.1': sparkdesk_v1_1,
### Qwen ###
'qwen-1.5-14b': qwen_1_5_14b,
### Zhipu AI ###
'glm4-9b': glm4_9b,
'chatglm3-6b': chatglm3_6b,
### 01-ai ###
'yi-1.5-9b': yi_1_5_9b,
### Upstage ###
'solar-1-mini': solar_1_mini,
### Pi ###
'pi': pi,
### Reka AI ###
'reka': reka_core,
#############
### Image ###
#############
### NVIDIA ###
'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
### Stability AI ###
'sdxl': sdxl,
'sd-3': sd_3,
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
'command-r+': command_r_plus,
### Playground ###
'playground-v2.5': playground_v2_5,
### Databricks ###
'dbrx-instruct': dbrx_instruct,
### GigaChat ###
'gigachat': gigachat,
### iFlytek ###
'SparkDesk-v1.1': SparkDesk_v1_1,
### DeepSeek ###
'deepseek-coder': deepseek_coder,
'deepseek-chat': deepseek_chat,
### Qwen ###
'Qwen2-7b-instruct': Qwen2_7B_instruct,
### Zhipu AI ###
'glm4-9b-chat': glm4_9B_chat,
'chatglm3-6b': chatglm3_6B,
### 01-ai ###
'Yi-1.5-9b-chat': Yi_1_5_9B_chat,
# Other
'pi': pi,
#############
### Image ###
#############
### Stability AI ###
'sdxl': sdxl,
'stable-diffusion-3': stable_diffusion_3,
### ByteDance ###
'sdxl-lightning': sdxl_lightning_4step,
### Playground ###
'playground-v2.5': playground_v2_5_1024px_aesthetic,
### Flux AI ###
'flux': flux,
'flux-realism': flux_realism,
'flux-anime': flux_anime,
'flux-3d': flux_3d,
'flux-disney': flux_disney,
}