Merge pull request #2103 from kqlio67/main

Enhancements and Updates Across Providers
This commit is contained in:
Tekky 2024-07-11 20:03:13 +02:00 committed by GitHub
commit 31edda55f7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 1079 additions and 344 deletions

67
g4f/Provider/AI365VIP.py Normal file
View File

@ -0,0 +1,67 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.ai365vip.com"
api_endpoint = "/api/chat"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-4o',
'claude-3-haiku-20240307',
]
model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat.ai365vip.com",
"priority": "u=1, i",
"referer": "https://chat.ai365vip.com/en",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"model": {
"id": model,
"name": {
"gpt-3.5-turbo": "GPT-3.5",
"claude-3-haiku-20240307": "claude-3-haiku",
"gpt-4o": "GPT-4O"
}.get(model, model),
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"prompt": "You are a helpful assistant.",
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@ -2,31 +2,35 @@ from __future__ import annotations
import uuid
import secrets
from aiohttp import ClientSession
import re
from aiohttp import ClientSession, ClientResponse
from typing import AsyncGenerator, Optional
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Blackbox(AsyncGeneratorProvider):
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
default_model = 'blackbox'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
image: ImageType = None,
image_name: str = None,
proxy: Optional[str] = None,
image: Optional[ImageType] = None,
image_name: Optional[str] = None,
**kwargs
) -> AsyncResult:
) -> AsyncGenerator[str, None]:
if image is not None:
messages[-1]["data"] = {
"fileText": image_name,
"imageBase64": to_data_uri(image)
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
@ -40,9 +44,11 @@ class Blackbox(AsyncGeneratorProvider):
"Alt-Used": "www.blackbox.ai",
"Connection": "keep-alive",
}
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
data = {
"messages": messages,
"id": random_id,
@ -55,10 +61,17 @@ class Blackbox(AsyncGeneratorProvider):
"playgroundMode": False,
"webSearchMode": False,
"userSystemPrompt": "",
"githubToken": None
"githubToken": None,
"maxTokens": None
}
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
async with session.post(
f"{cls.url}/api/chat", json=data, proxy=proxy
) as response: # type: ClientResponse
response.raise_for_status()
async for chunk in response.content:
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()
# Decode the chunk and clean up unwanted prefixes using a regex
decoded_chunk = chunk.decode()
cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
yield cleaned_chunk

83
g4f/Provider/Chatgpt4o.py Normal file
View File

@ -0,0 +1,83 @@
from __future__ import annotations
import re
from ..requests import StreamSession, raise_for_status
from ..typing import Messages
from .base_provider import AsyncProvider, ProviderModelMixin
from .helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):
url = "https://chatgpt4o.one"
supports_gpt_4 = True
working = True
_post_id = None
_nonce = None
default_model = 'gpt-4o'
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
cookies: dict = None,
**kwargs
) -> str:
headers = {
'authority': 'chatgpt4o.one',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'origin': 'https://chatgpt4o.one',
'referer': 'https://chatgpt4o.one',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(
headers=headers,
cookies=cookies,
impersonate="chrome",
proxies={"all": proxy},
timeout=timeout
) as session:
if not cls._post_id or not cls._nonce:
async with session.get(f"{cls.url}/") as response:
await raise_for_status(response)
response_text = await response.text()
post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
if not post_id_match:
raise RuntimeError("No post ID found")
cls._post_id = post_id_match.group(1)
if not nonce_match:
raise RuntimeError("No nonce found")
cls._nonce = nonce_match.group(1)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": "0"
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
response_json = await response.json()
if "data" not in response_json:
raise RuntimeError("Unexpected response structure: 'data' field missing")
return response_json["data"]

View File

@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
working = False
working = True
_post_id = None
_nonce = None

117
g4f/Provider/DDG.py Normal file
View File

@ -0,0 +1,117 @@
from __future__ import annotations
import json
import aiohttp
import asyncio
from typing import Optional
import base64
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from ..providers.conversation import BaseConversation
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8")
working = True
supports_gpt_35_turbo = True
supports_message_history = True
default_model = "gpt-3.5-turbo-0125"
models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307", "meta-llama/Llama-3-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
model_aliases = {
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}
# Obfuscated URLs and headers
status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8")
chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8")
referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:127.0) Gecko/20100101 Firefox/127.0'
headers = {
'User-Agent': user_agent,
'Accept': 'text/event-stream',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br, zstd',
'Referer': referer,
'Content-Type': 'application/json',
'Origin': origin,
'Connection': 'keep-alive',
'Cookie': 'dcm=3',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Pragma': 'no-cache',
'TE': 'trailers'
}
@classmethod
async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]:
try:
async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
await raise_for_status(response)
return response.headers.get("x-vqd-4")
except Exception as e:
print(f"Error getting VQD: {e}")
return None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: aiohttp.BaseConnector = None,
conversation: Conversation = None,
return_conversation: bool = False,
**kwargs
) -> AsyncResult:
async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session:
vqd_4 = None
if conversation is not None and len(messages) > 1:
vqd_4 = conversation.vqd_4
messages = [*conversation.messages, messages[-2], messages[-1]]
else:
for _ in range(3): # Try up to 3 times to get a valid VQD
vqd_4 = await cls.get_vqd(session)
if vqd_4:
break
await asyncio.sleep(1) # Wait a bit before retrying
if not vqd_4:
raise Exception("Failed to obtain a valid VQD token")
messages = [messages[-1]] # Only use the last message for new conversations
payload = {
'model': cls.get_model(model),
'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
}
async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
await raise_for_status(response)
if return_conversation:
yield Conversation(vqd_4, messages)
async for line in response.content:
if line.startswith(b"data: "):
chunk = line[6:]
if chunk.startswith(b"[DONE]"):
break
try:
data = json.loads(chunk)
if "message" in data and data["message"]:
yield data["message"]
except json.JSONDecodeError:
print(f"Failed to decode JSON: {chunk}")
class Conversation(BaseConversation):
def __init__(self, vqd_4: str, messages: Messages) -> None:
self.vqd_4 = vqd_4
self.messages = messages

View File

@ -1,17 +1,20 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
import asyncio
from aiohttp import ClientSession, TCPConnector
from urllib.parse import urlencode
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Feedough(AsyncGeneratorProvider):
class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.feedough.com"
api_endpoint = "/wp-admin/admin-ajax.php"
working = True
supports_gpt_35_turbo = True
default_model = ''
@classmethod
async def create_async_generator(
@ -22,31 +25,54 @@ class Feedough(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Referer": "https://www.feedough.com/ai-prompt-generator/",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://www.feedough.com",
"DNT": "1",
"Sec-GPC": "1",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/ai-prompt-generator/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
connector = TCPConnector(ssl=False)
async with ClientSession(headers=headers, connector=connector) as session:
data = {
"action": "aixg_generate",
"prompt": prompt,
"prompt": format_prompt(messages),
"aixg_generate_nonce": "110c021031"
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
try:
async with session.post(
f"{cls.url}{cls.api_endpoint}",
data=urlencode(data),
proxy=proxy
) as response:
response.raise_for_status()
response_text = await response.text()
try:
response_json = json.loads(response_text)
if response_json["success"]:
message = response_json["data"]["message"]
if response_json.get("success") and "data" in response_json:
message = response_json["data"].get("message", "")
yield message
except json.JSONDecodeError:
yield response_text
except Exception as e:
print(f"An error occurred: {e}")
@classmethod
async def run(cls, *args, **kwargs):
async for item in cls.create_async_generator(*args, **kwargs):
yield item
tasks = asyncio.all_tasks()
for task in tasks:
if not task.done():
await task

View File

@ -1,17 +1,27 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from .helper import format_prompt
class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.chatgpt.org.uk"
url = "https://chat.chatgpt.org.uk"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
supports_message_history = True
default_model = "google-gemini-pro"
supports_gpt_35_turbo = True
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'SparkDesk-v1.1',
'deepseek-coder',
'deepseek-chat',
'Qwen2-7B-Instruct',
'glm4-9B-chat',
'chatglm3-6B',
'Yi-1.5-9B-Chat',
]
@classmethod
async def create_async_generator(
@ -19,45 +29,50 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "application/json, text/event-stream",
"Content-Type":"application/json",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"Host":"free.chatgpt.org.uk",
"Referer":f"{cls.url}/",
"Origin":f"{cls.url}",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": messages,
"messages": [
{"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
{"role": "user", "content": prompt}
],
"stream": True,
"model": cls.get_model(""),
"temperature": kwargs.get("temperature", 0.5),
"presence_penalty": kwargs.get("presence_penalty", 0),
"frequency_penalty": kwargs.get("frequency_penalty", 0),
"top_p": kwargs.get("top_p", 1)
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1
}
async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
await raise_for_status(response)
started = False
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
accumulated_text = ""
async for line in response.content:
if line.startswith(b"data: [DONE]"):
if line:
line_str = line.decode().strip()
if line_str == "data: [DONE]":
yield accumulated_text
break
elif line.startswith(b"data: "):
line = json.loads(line[6:])
if(line["choices"]==[]):
continue
chunk = line["choices"][0]["delta"].get("content")
if chunk:
started = True
yield chunk
if not started:
raise RuntimeError("Empty response")
elif line_str.startswith("data: "):
try:
chunk = json.loads(line_str[6:])
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
accumulated_text += delta_content
except json.JSONDecodeError:
pass

View File

@ -1,55 +1,67 @@
from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncResult, Messages
import time
import hashlib
import random
from typing import AsyncGenerator, Optional, Dict, Any
from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
domains = [
# Constants
DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
]
RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegptsnav.aifree.site"
working = True
supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True
url: str = "https://freegptsnav.aifree.site"
working: bool = True
supports_message_history: bool = True
supports_system_message: bool = True
supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
proxy: Optional[str] = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
**kwargs: Any
) -> AsyncGenerator[str, None]:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = cls._build_request_data(messages, prompt, timestamp)
domain = random.choice(DOMAINS)
async with StreamSession(
impersonate="chrome",
timeout=timeout,
proxies={"all": proxy}
proxies={"all": proxy} if proxy else None
) as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
domain = random.choice(domains)
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
chunk = chunk.decode(errors="ignore")
if chunk == "当前地区当日额度已消耗完":
chunk_decoded = chunk.decode(errors="ignore")
if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
yield chunk
yield chunk_decoded
def generate_signature(timestamp: int, message: str, secret: str = ""):
@staticmethod
def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
return {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt, secret)
}
def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -9,13 +9,14 @@ from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider):
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
default_model = ''
@classmethod
async def create_async_generator(
@ -32,8 +33,8 @@ class GeminiProChat(AsyncGeneratorProvider):
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": "https://gemini-chatbot-sigma.vercel.app/",
"Origin": "https://gemini-chatbot-sigma.vercel.app",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",

View File

@ -13,15 +13,13 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
'CohereForAI/c4ai-command-r-plus',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'google/gemma-1.1-7b-it',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'mistralai/Mistral-7B-Instruct-v0.2',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
'01-ai/Yi-1.5-34B-Chat'
]
model_aliases = {

View File

@ -12,10 +12,16 @@ from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
needs_auth = True
supports_message_history = True
models = [
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.2"
'CohereForAI/c4ai-command-r-plus',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"

View File

@ -1,7 +1,8 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, BaseConnector
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@ -19,12 +20,13 @@ class Koala(AsyncGeneratorProvider):
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
proxy: Optional[str] = None,
connector: Optional[BaseConnector] = None,
**kwargs: Any
) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
@ -40,13 +42,17 @@ class Koala(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
}
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
input = messages[-1]["content"]
system_messages = [message["content"] for message in messages if message["role"] == "system"]
input_text = messages[-1]["content"]
system_messages = " ".join(
message["content"] for message in messages if message["role"] == "system"
)
if system_messages:
input += " ".join(system_messages)
input_text += f" {system_messages}"
data = {
"input": input,
"input": input_text,
"inputHistory": [
message["content"]
for message in messages[:-1]
@ -59,8 +65,14 @@ class Koala(AsyncGeneratorProvider):
],
"model": model,
}
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in cls._parse_event_stream(response):
yield chunk
@staticmethod
async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
async for chunk in response.content:
if chunk.startswith(b"data: "):
yield json.loads(chunk[6:])

View File

@ -10,7 +10,16 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
"gpt-4o": {
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 48000,
"tokenLimit": 14000,
"context": "16K",
},
"gpt-4o-free": {
"context": "8K",
"id": "gpt-4o-free",
"maxLength": 31200,
@ -19,51 +28,74 @@ models = {
"provider": "OpenAI",
"tokenLimit": 7800,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
"maxLength": 48000,
"tokenLimit": 14000,
"context": "16K",
},
"gpt-4-turbo": {
"id": "gpt-4-turbo-preview",
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4": {
"id": "gpt-4-plus",
"name": "GPT-4-Plus",
"maxLength": 130000,
"tokenLimit": 31000,
"context": "32K",
"gpt-4o": {
"context": "128K",
"id": "gpt-4o",
"maxLength": 124000,
"model": "ChatGPT",
"name": "GPT-4o",
"provider": "OpenAI",
"tokenLimit": 62000,
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4-0613",
"maxLength": 60000,
"tokenLimit": 15000,
"context": "16K",
},
"gemini-pro": {
"id": "gemini-pro",
"name": "Gemini-Pro",
"maxLength": 120000,
"tokenLimit": 30000,
"context": "32K",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 32000,
"tokenLimit": 7600,
"context": "8K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229-aws": {
"id": "claude-3-opus-20240229-aws",
"name": "Claude-3-Opus-Aws",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-100k-poe": {
"id": "claude-3-opus-100k-poe",
"name": "Claude-3-Opus-100k-Poe",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 99000,
"context": "100K",
},
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@ -71,6 +103,8 @@ models = {
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@ -78,16 +112,38 @@ models = {
"claude-2.0": {
"id": "claude-2.0",
"name": "Claude-2.0-100k",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
},
"claude-instant-1": {
"id": "claude-instant-1",
"name": "Claude-instant-1",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
"gemini-1.0-pro-latest": {
"id": "gemini-1.0-pro-latest",
"name": "Gemini-Pro",
"model": "Gemini",
"provider": "Google",
"maxLength": 120000,
"tokenLimit": 30000,
"context": "32K",
},
"gemini-1.5-flash-latest": {
"id": "gemini-1.5-flash-latest",
"name": "Gemini-1.5-Flash-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-pro-latest": {
"id": "gemini-1.5-pro-latest",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
}
}
@ -100,9 +156,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
models = list(models)
models = list(models.keys())
model_aliases = {
"claude-v2": "claude-2"
"claude-v2": "claude-2.0"
}
_auth_code = ""
_cookie_jar = None
@ -131,7 +187,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
data = {
"conversationId": str(uuid.uuid4()),
"model": models[cls.get_model(model)],
"model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@ -189,3 +245,45 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
@classmethod
def get_model(cls, model: str) -> str:
"""
Retrieve the internal model identifier based on the provided model name or alias.
"""
if model in cls.model_aliases:
model = cls.model_aliases[model]
if model not in models:
raise ValueError(f"Model '{model}' is not supported.")
return model
@classmethod
def is_supported(cls, model: str) -> bool:
"""
Check if the given model is supported.
"""
return model in models or model in cls.model_aliases
@classmethod
async def initialize_auth_code(cls, session: ClientSession) -> None:
"""
Initialize the auth code by making the necessary login requests.
"""
async with session.post(
"https://liaobots.work/api/user",
json={"authcode": "pTIQr4FTnVRfr"},
verify_ssl=False
) as response:
await raise_for_status(response)
cls._auth_code = (await response.json(content_type=None))["authCode"]
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
@classmethod
async def ensure_auth_code(cls, session: ClientSession) -> None:
"""
Ensure the auth code is initialized, and if not, perform the initialization.
"""
if not cls._auth_code:
await cls.initialize_auth_code(session)

View File

@ -12,7 +12,7 @@ from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse, ImagePreview
from ..errors import ResponseError
from .base_provider import AsyncGeneratorProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector, format_cookies
class Sources():
@ -25,10 +25,11 @@ class Sources():
class AbraGeoBlockedError(Exception):
pass
class MetaAI(AsyncGeneratorProvider):
class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Meta AI"
url = "https://www.meta.ai"
working = True
default_model = ''
def __init__(self, proxy: str = None, connector: BaseConnector = None):
self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)

View File

@ -1,32 +1,61 @@
from __future__ import annotations
from typing import Generator, Optional, Dict, Any, Union, List
import random
import asyncio
import base64
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..image import ImageResponse
from ..errors import ResponseError
from ..image import ImageResponse
class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
default_model = 'stability-ai/sdxl'
default_versions = [
"39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
"2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2"
models = [
# image
'stability-ai/sdxl',
'ai-forever/kandinsky-2.2',
# text
'meta/llama-2-70b-chat',
'mistralai/mistral-7b-instruct-v0.2'
]
image_models = [default_model]
versions = {
# image
'stability-ai/sdxl': [
"39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
"2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
"7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
],
'ai-forever/kandinsky-2.2': [
"ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
],
# Text
'meta/llama-2-70b-chat': [
"dp-542693885b1777c98ef8c5a98f2005e7"
],
'mistralai/mistral-7b-instruct-v0.2': [
"dp-89e00f489d498885048e94f9809fbc76"
]
}
image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
**kwargs
) -> AsyncResult:
**kwargs: Any
) -> Generator[Union[str, ImageResponse], None, None]:
yield await cls.create_async(messages[-1]["content"], model, **kwargs)
@classmethod
@ -34,13 +63,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
cls,
prompt: str,
model: str,
api_key: str = None,
proxy: str = None,
api_key: Optional[str] = None,
proxy: Optional[str] = None,
timeout: int = 180,
version: str = None,
extra_data: dict = {},
**kwargs
) -> ImageResponse:
version: Optional[str] = None,
extra_data: Dict[str, Any] = {},
**kwargs: Any
) -> Union[str, ImageResponse]:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
@ -55,10 +84,12 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
if version is None:
version = random.choice(cls.default_versions)
version = random.choice(cls.versions.get(model, []))
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with StreamSession(
proxies={"all": proxy},
headers=headers,
@ -81,6 +112,7 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
result = await response.json()
if "id" not in result:
raise ResponseError(f"Invalid response: {result}")
while True:
if api_key is None:
url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
@ -92,7 +124,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
if "status" not in result:
raise ResponseError(f"Invalid response: {result}")
if result["status"] == "succeeded":
images = result['output']
output = result['output']
if model in cls.text_models:
return ''.join(output) if isinstance(output, list) else output
elif model in cls.image_models:
images: List[Any] = output
images = images[0] if len(images) == 1 else images
return ImageResponse(images, prompt)
elif result["status"] == "failed":
raise ResponseError(f"Prediction failed: {result}")
await asyncio.sleep(0.5)

View File

@ -10,28 +10,24 @@ from .not_working import *
from .selenium import *
from .needs_auth import *
from .Aichatos import Aichatos
from .AI365VIP import AI365VIP
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
from .ChatForAi import ChatForAi
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptAi import ChatgptAi
from .ChatgptFree import ChatgptFree
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
from .Cnote import Cnote
from .Cohere import Cohere
from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .GigaChat import GigaChat
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
@ -45,12 +41,12 @@ from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateImage import ReplicateImage
from .ReplicateHome import ReplicateHome
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
from .Reka import Reka
import sys

View File

@ -9,7 +9,7 @@ from ..base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
working = True
working = False
supports_gpt_35_turbo = True
@staticmethod

View File

@ -2,16 +2,16 @@ from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
import random
class Aichatos(AsyncGeneratorProvider):
url = "https://chat10.aichatos.xyz"
api = "https://api.binjie.fun"
working = True
working = False
supports_gpt_35_turbo = True
@classmethod

View File

@ -4,14 +4,14 @@ import time
import hashlib
import uuid
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..errors import RateLimitError
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...errors import RateLimitError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatforai.store"
working = True
working = False
default_model = "gpt-3.5-turbo"
supports_message_history = True
supports_gpt_35_turbo = True

View File

@ -3,14 +3,14 @@ from __future__ import annotations
import re, html, json, string, random
from aiohttp import ClientSession
from ..typing import Messages, AsyncResult
from ..errors import RateLimitError
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
from ...typing import Messages, AsyncResult
from ...errors import RateLimitError
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_random_string
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
working = True
working = False
supports_message_history = True
supports_system_message = True,
supports_gpt_4 = True,

View File

@ -3,12 +3,12 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc"
working = True
working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_system_message = True

View File

@ -4,15 +4,15 @@ import re
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ..errors import RateLimitError
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
from ...errors import RateLimitError
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
working = True
working = False
@classmethod
async def create_async_generator(

View File

@ -3,15 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class Cnote(AsyncGeneratorProvider):
url = "https://f1.cnote.top"
api_url = "https://p1api.xjai.pro/freeapi/chat-process"
working = True
working = False
supports_gpt_35_turbo = True
@classmethod

View File

@ -0,0 +1,78 @@
from __future__ import annotations
import json
import asyncio
from aiohttp import ClientSession, TCPConnector
from urllib.parse import urlencode
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.feedough.com"
api_endpoint = "/wp-admin/admin-ajax.php"
working = False
default_model = ''
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/ai-prompt-generator/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
connector = TCPConnector(ssl=False)
async with ClientSession(headers=headers, connector=connector) as session:
data = {
"action": "aixg_generate",
"prompt": format_prompt(messages),
"aixg_generate_nonce": "110c021031"
}
try:
async with session.post(
f"{cls.url}{cls.api_endpoint}",
data=urlencode(data),
proxy=proxy
) as response:
response.raise_for_status()
response_text = await response.text()
try:
response_json = json.loads(response_text)
if response_json.get("success") and "data" in response_json:
message = response_json["data"].get("message", "")
yield message
except json.JSONDecodeError:
yield response_text
except Exception as e:
print(f"An error occurred: {e}")
@classmethod
async def run(cls, *args, **kwargs):
async for item in cls.create_async_generator(*args, **kwargs):
yield item
tasks = asyncio.all_tasks()
for task in tasks:
if not task.done():
await task

View File

@ -1,11 +1,18 @@
from .AItianhu import AItianhu
from .Aichatos import Aichatos
from .Bestim import Bestim
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDemoAi import ChatgptDemoAi
from .ChatgptLogin import ChatgptLogin
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
from .Chatxyz import Chatxyz
from .Cnote import Cnote
from .Feedough import Feedough
from .Gpt6 import Gpt6
from .GptChatly import GptChatly
from .GptForLove import GptForLove

View File

@ -4,17 +4,19 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
Aichatos,
AI365VIP,
Bing,
Blackbox,
ChatgptAi,
ChatgptNext,
Cnote,
Chatgpt4o,
ChatgptFree,
DDG,
DeepInfra,
Feedough,
DeepInfraImage,
FreeChatgpt,
FreeGpt,
Gemini,
GeminiPro,
GeminiProChat,
GigaChat,
HuggingChat,
HuggingFace,
@ -23,11 +25,13 @@ from .Provider import (
MetaAI,
OpenaiChat,
PerplexityLabs,
Replicate,
Pi,
Pizzagpt,
Reka,
Replicate,
ReplicateHome,
Vercel,
You,
Reka
)
@dataclass(unsafe_hash=True)
@ -54,9 +58,15 @@ default = Model(
base_provider = "",
best_provider = IterListProvider([
Bing,
ChatgptAi,
You,
OpenaiChat,
FreeChatgpt,
AI365VIP,
Chatgpt4o,
DDG,
ChatgptFree,
Koala,
Pizzagpt,
])
)
@ -67,28 +77,58 @@ gpt_35_long = Model(
best_provider = IterListProvider([
FreeGpt,
You,
ChatgptNext,
OpenaiChat,
Koala,
ChatgptFree,
FreeChatgpt,
DDG,
AI365VIP,
Pizzagpt,
])
)
# GPT-3.5 / GPT-4
############
### Text ###
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
FreeGpt,
You,
ChatgptNext,
Koala,
OpenaiChat,
Aichatos,
Cnote,
Feedough,
ChatgptFree,
FreeChatgpt,
DDG,
AI365VIP,
Pizzagpt,
])
)
gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider = gpt_35_turbo.best_provider
)
# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
@ -97,12 +137,22 @@ gpt_4 = Model(
])
)
gpt_4o = Model(
name = 'gpt-4o',
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai',
best_provider = IterListProvider([
You, Liaobots
])
best_provider = gpt_4.best_provider
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_turbo = Model(
@ -111,18 +161,36 @@ gpt_4_turbo = Model(
best_provider = Bing
)
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
You, Liaobots, Chatgpt4o, AI365VIP
])
)
### GigaChat ###
gigachat = Model(
name = 'GigaChat:latest',
base_provider = 'gigachat',
best_provider = GigaChat
)
### Meta ###
meta = Model(
name = "meta",
base_provider = "meta",
best_provider = MetaAI
)
llama_2_70b_chat = Model(
name = "meta/llama-2-70b-chat",
base_provider = "meta",
best_provider = IterListProvider([ReplicateHome])
)
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
@ -132,7 +200,7 @@ llama3_8b_instruct = Model(
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, HuggingChat, DDG])
)
codellama_34b_instruct = Model(
@ -144,35 +212,77 @@ codellama_34b_instruct = Model(
codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs])
best_provider = IterListProvider([DeepInfra])
)
# Mistral
### Mistral ###
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
)
mistral_7b = Model(
name = "mistralai/Mistral-7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat, ReplicateHome])
)
# Bard
### NousResearch ###
Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
name = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
base_provider = "NousResearch",
best_provider = IterListProvider([HuggingFace, HuggingChat])
)
### 01-ai ###
Yi_1_5_34B_Chat = Model(
name = "01-ai/Yi-1.5-34B-Chat",
base_provider = "01-ai",
best_provider = IterListProvider([HuggingFace, HuggingChat])
)
### Microsoft ###
Phi_3_mini_4k_instruct = Model(
name = "microsoft/Phi-3-mini-4k-instruct",
base_provider = "Microsoft",
best_provider = IterListProvider([HuggingFace, HuggingChat])
)
### Google ###
# gemini
gemini = Model(
name = 'gemini',
base_provider = 'google',
base_provider = 'Google',
best_provider = Gemini
)
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
best_provider = IterListProvider([GeminiPro, You, GeminiProChat])
)
# gemma
gemma_2_9b_it = Model(
name = 'gemma-2-9b-it',
base_provider = 'Google',
best_provider = IterListProvider([PerplexityLabs])
)
gemma_2_27b_it = Model(
name = 'gemma-2-27b-it',
base_provider = 'Google',
best_provider = IterListProvider([PerplexityLabs])
)
### Anthropic ###
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
@ -194,79 +304,76 @@ claude_3_sonnet = Model(
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'anthropic',
best_provider = None
best_provider = IterListProvider([DDG, AI365VIP])
)
gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
### Reka AI ###
reka_core = Model(
name = 'reka-core',
base_provider = 'Reka AI',
best_provider = Reka
)
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
### NVIDIA ###
nemotron_4_340b_instruct = Model(
name = 'nemotron-4-340b-instruct',
base_provider = 'NVIDIA',
best_provider = IterListProvider([PerplexityLabs])
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider = gpt_35_turbo.best_provider
### Blackbox ###
blackbox = Model(
name = 'blackbox',
base_provider = 'Blackbox',
best_provider = Blackbox
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
### Databricks ###
dbrx_instruct = Model(
name = 'databricks/dbrx-instruct',
base_provider = 'Databricks',
best_provider = IterListProvider([DeepInfra])
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai',
best_provider = gpt_4.best_provider
### CohereForAI ###
command_r_plus = Model(
name = 'CohereForAI/c4ai-command-r-plus',
base_provider = 'CohereForAI',
best_provider = IterListProvider([HuggingChat])
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
best_provider = IterListProvider([GeminiPro, You])
)
### Other ###
pi = Model(
name = 'pi',
base_provider = 'inflection',
best_provider = Pi
)
dbrx_instruct = Model(
name = 'databricks/dbrx-instruct',
base_provider = 'mistral',
best_provider = IterListProvider([DeepInfra, PerplexityLabs])
#############
### Image ###
#############
### Stability AI ###
sdxl = Model(
name = 'stability-ai/sdxl',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
)
command_r_plus = Model(
name = 'CohereForAI/c4ai-command-r-plus',
base_provider = 'mistral',
best_provider = IterListProvider([HuggingChat])
)
### AI Forever ###
kandinsky_2_2 = Model(
name = 'ai-forever/kandinsky-2.2',
base_provider = 'AI Forever',
best_provider = IterListProvider([ReplicateHome])
blackbox = Model(
name = 'blackbox',
base_provider = 'blackbox',
best_provider = Blackbox
)
reka_core = Model(
name = 'reka-core',
base_provider = 'Reka AI',
best_provider = Reka
)
class ModelUtils:
@ -277,6 +384,13 @@ class ModelUtils:
convert (dict[str, Model]): Dictionary mapping model string identifiers to Model instances.
"""
convert: dict[str, Model] = {
############
### Text ###
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
# gpt-3.5
'gpt-3.5-turbo' : gpt_35_turbo,
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
@ -292,7 +406,11 @@ class ModelUtils:
'gpt-4-32k-0613' : gpt_4_32k_0613,
'gpt-4-turbo' : gpt_4_turbo,
### Meta ###
"meta-ai": meta,
'llama-2-70b-chat': llama_2_70b_chat,
'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
@ -301,30 +419,79 @@ class ModelUtils:
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
# Mistral Opensource
### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
'mistral-7b-v02': mistral_7b_v02,
# google gemini
### NousResearch ###
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
### 01-ai ###
'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
### Microsoft ###
'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
# anthropic
# gemma
'gemma-2-9b-it': gemma_2_9b_it,
'gemma-2-27b-it': gemma_2_27b_it,
### Anthropic ###
'claude-v2': claude_v2,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku,
# reka core
### Reka AI ###
'reka': reka_core,
# other
### NVIDIA ###
'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
'command-r+': command_r_plus,
### Databricks ###
'dbrx-instruct': dbrx_instruct,
### GigaChat ###
'gigachat': gigachat,
'pi': pi
# Other
'pi': pi,
#############
### Image ###
#############
### Stability AI ###
'sdxl': sdxl,
### AI Forever ###
'kandinsky-2.2': kandinsky_2_2,
}
_all_models = list(ModelUtils.convert.keys())