Merge pull request #2125 from kqlio67/main

Added new providers, updated existing ones, and added new models and updated model lists
This commit is contained in:
Tekky 2024-08-01 16:10:29 -03:00 committed by GitHub
commit 36e6306bd0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 1006 additions and 376 deletions

71
g4f/Provider/Allyfy.py Normal file
View File

@ -0,0 +1,71 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class Allyfy(AsyncGeneratorProvider):
url = "https://chatbot.allyfy.chat"
api_endpoint = "/api/v1/message/stream/super/chat"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json;charset=utf-8",
"dnt": "1",
"origin": "https://www.allyfy.chat",
"priority": "u=1, i",
"referer": "https://www.allyfy.chat/",
"referrer": "https://www.allyfy.chat",
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": [{"content": prompt, "role": "user"}],
"content": prompt,
"baseInfo": {
"clientId": "q08kdrde1115003lyedfoir6af0yy531",
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
"localZone": 180,
"packageName": "com.cch.allyfy.webh",
}
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:
line = line.decode().strip()
if line.startswith("data:"):
data_content = line[5:]
if data_content == "[DONE]":
break
try:
json_data = json.loads(data_content)
if "content" in json_data:
full_response.append(json_data["content"])
except json.JSONDecodeError:
continue
yield "".join(full_response)

75
g4f/Provider/ChatGot.py Normal file
View File

@ -0,0 +1,75 @@
from __future__ import annotations
import time
from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()

View File

@ -1,22 +1,18 @@
from __future__ import annotations
import re
import json
from aiohttp import ClientSession
from ..typing import Messages, AsyncResult
from ..requests import get_args_from_browser
from ..webdriver import WebDriver
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
from .helper import format_prompt
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
supports_message_history = True
supports_gpt_35_turbo = True
working = True
_wpnonce = None
_context_id = None
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
supports_gpt_4 = True
@classmethod
async def create_async_generator(
@ -24,49 +20,52 @@ class Chatgpt4Online(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
async with ClientSession(**args) as session:
if not cls._wpnonce:
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'restNonce":"(.*?)"', response)
if result:
cls._wpnonce = result.group(1)
else:
raise RuntimeError("No nonce found")
result = re.search(r'contextId":(.*?),', response)
if result:
cls._context_id = result.group(1)
else:
raise RuntimeError("No contextId found")
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": "d9505e9877",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"botId":"default",
"customId":None,
"session":"N/A",
"chatId":get_random_string(11),
"contextId":cls._context_id,
"messages":messages[:-1],
"newMessage":messages[-1]["content"],
"newImageId":None,
"stream":True
"botId": "default",
"newMessage": prompt,
"stream": True,
}
async with session.post(
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
json=data,
proxy=proxy,
headers={"x-wp-nonce": cls._wpnonce}
) as response:
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:])
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
full_response = ""
async for chunk in response.content.iter_any():
if chunk:
try:
# Extract the JSON object from the chunk
for line in chunk.decode().splitlines():
if line.startswith("data: "):
json_data = json.loads(line[6:])
if json_data["type"] == "live":
full_response += json_data["data"]
elif json_data["type"] == "end":
final_data = json.loads(json_data["data"])
full_response = final_data["reply"]
break
except json.JSONDecodeError:
continue
yield full_response

107
g4f/Provider/FreeNetfly.py Normal file
View File

@ -0,0 +1,107 @@
from __future__ import annotations
import json
import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-4',
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
data = {
"messages": messages,
"stream": True,
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1
}
max_retries = 3
retry_delay = 1
for attempt in range(max_retries):
try:
async with ClientSession(headers=headers) as session:
timeout = ClientTimeout(total=60)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
response.raise_for_status()
async for chunk in cls._process_response(response):
yield chunk
return # If successful, exit the function
except (ClientError, asyncio.TimeoutError) as e:
if attempt == max_retries - 1:
raise # If all retries failed, raise the last exception
await asyncio.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
@classmethod
async def _process_response(cls, response) -> AsyncGenerator[str, None]:
buffer = ""
async for line in response.content:
buffer += line.decode('utf-8')
if buffer.endswith('\n\n'):
for subline in buffer.strip().split('\n'):
if subline.startswith('data: '):
if subline == 'data: [DONE]':
return
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except json.JSONDecodeError:
print(f"Failed to parse JSON: {subline}")
except KeyError:
print(f"Unexpected JSON structure: {data}")
buffer = ""
# Process any remaining data in the buffer
if buffer:
for subline in buffer.strip().split('\n'):
if subline.startswith('data: ') and subline != 'data: [DONE]':
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except (json.JSONDecodeError, KeyError):
pass

View File

@ -13,10 +13,10 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
default_model = ''
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(

View File

@ -13,8 +13,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',

View File

@ -14,16 +14,17 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
needs_auth = True
supports_message_history = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(

View File

@ -10,14 +10,23 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 48000,
"tokenLimit": 14000,
"context": "16K",
"maxLength": 31200,
"tokenLimit": 7800,
"context": "8K",
},
"gpt-4o-mini": {
"id": "gpt-4o-mini",
"name": "GPT-4o-Mini",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4o-free": {
"context": "8K",
@ -48,106 +57,26 @@ models = {
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4-0613",
"name": "GPT-4",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 32000,
"tokenLimit": 7600,
"context": "8K",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
"gpt-4-turbo": {
"id": "gpt-4-turbo",
"name": "GPT-4-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"claude-3-opus-20240229-aws": {
"id": "claude-3-opus-20240229-aws",
"name": "Claude-3-Opus-Aws",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-100k-poe": {
"id": "claude-3-opus-100k-poe",
"name": "Claude-3-Opus-100k-Poe",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 99000,
"context": "100K",
},
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-2.0": {
"id": "claude-2.0",
"name": "Claude-2.0-100k",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
},
"gemini-1.0-pro-latest": {
"id": "gemini-1.0-pro-latest",
"name": "Gemini-Pro",
"model": "Gemini",
"provider": "Google",
"maxLength": 120000,
"tokenLimit": 30000,
"context": "32K",
},
"gemini-1.5-flash-latest": {
"id": "gemini-1.5-flash-latest",
"name": "Gemini-1.5-Flash-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-pro-latest": {
"id": "gemini-1.5-pro-latest",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
}
}
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
@ -155,10 +84,22 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
default_model = "gpt-4o"
models = list(models.keys())
model_aliases = {
"claude-v2": "claude-2.0"
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4-": "gpt-4-0613",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307",
"gemini-pro": "gemini-1.5-pro-latest",
"gemini-pro": "gemini-1.0-pro-latest",
"gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None

View File

@ -0,0 +1,97 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://lite.icoding.ink"
api_endpoint = "/api/v1/gpt/message"
working = True
supports_gpt_4 = True
default_model = "gpt-4o"
models = [
'gpt-4o',
'gpt-4-turbo',
'claude-3',
'claude-3.5',
'gemini-1.5',
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": "Bearer null",
"Connection": "keep-alive",
"Content-Type": "application/json;charset=utf-8",
"DNT": "1",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/126.0.0.0 Safari/537.36"
),
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
}
data = {
"model": model,
"chatId": "-1",
"messages": [
{
"role": msg["role"],
"content": msg["content"],
"time": msg.get("time", ""),
"attachments": msg.get("attachments", []),
}
for msg in messages
],
"plugins": [],
"systemPrompt": "",
"temperature": 0.5,
}
async with ClientSession(headers=headers) as session:
try:
async with session.post(
f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
) as response:
response.raise_for_status()
buffer = ""
full_response = ""
async for chunk in response.content.iter_any():
if chunk:
buffer += chunk.decode()
while "\n\n" in buffer:
part, buffer = buffer.split("\n\n", 1)
if part.startswith("data: "):
content = part[6:].strip()
if content and content != "[DONE]":
content = content.strip('"')
full_response += content
full_response = full_response.replace('" "', ' ')
yield full_response.strip()
except ClientResponseError as e:
raise RuntimeError(
f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
) from e
except Exception as e:
raise RuntimeError(f"Unexpected error: {str(e)}") from e

View File

@ -0,0 +1,50 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.magickpen.com"
api_endpoint = "/ask"
working = True
supports_gpt_4 = True
default_model = "gpt-4o"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://magickpen.com",
"priority": "u=1, i",
"referer": "https://magickpen.com/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"query": format_prompt(messages),
"plan": "Pay as you go"
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@ -0,0 +1,50 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.magickpen.com"
api_endpoint = "/chat/free"
working = True
supports_gpt_4 = True
default_model = "gpt-4o-mini"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"access-control-allow-origin": "*",
"content-type": "application/json",
"dnt": "1",
"origin": "https://magickpen.com",
"priority": "u=1, i",
"referer": "https://magickpen.com/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"history": [{"role": "user", "content": format_prompt(messages)}]
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

64
g4f/Provider/Marsyoo.py Normal file
View File

@ -0,0 +1,64 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aiagent.marsyoo.com"
api_endpoint = "/api/chat-messages"
working = True
supports_gpt_4 = True
default_model = 'gpt-4o'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "keep-alive",
"DNT": "1",
"Origin": cls.url,
"Referer": f"{cls.url}/chat",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
"content-type": "application/json",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "Linux",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"response_mode": "streaming",
"query": prompt,
"inputs": {},
}
try:
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
try:
json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
if json_data['event'] == 'message':
yield json_data['answer']
elif json_data['event'] == 'message_end':
return
except json.JSONDecodeError:
continue
except ClientResponseError as e:
yield f"Error: HTTP {e.status}: {e.message}"

View File

@ -15,21 +15,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
"llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
"dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
"llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
"related"
"llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
]
model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
"codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
"llava-v1.5-7b": "llava-v1.5-7b-wrapper",
"databricks/dbrx-instruct": "dbrx-instruct",
"meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
"meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
}
@classmethod
async def create_async_generator(

View File

@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
default_model = "pi"
@classmethod
def create_completion(
@ -65,4 +66,4 @@ class Pi(AbstractProvider):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
yield json.loads(line.split(b'data: ')[1])

View File

@ -14,40 +14,46 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
default_model = 'stability-ai/sdxl'
default_model = 'stability-ai/stable-diffusion-3'
models = [
# image
'stability-ai/sdxl',
'ai-forever/kandinsky-2.2',
# Models for image generation
'stability-ai/stable-diffusion-3',
'bytedance/sdxl-lightning-4step',
'playgroundai/playground-v2.5-1024px-aesthetic',
# text
'meta/llama-2-70b-chat',
'mistralai/mistral-7b-instruct-v0.2'
# Models for image generation
'meta/meta-llama-3-70b-instruct',
'mistralai/mixtral-8x7b-instruct-v0.1',
'google-deepmind/gemma-2b-it',
]
versions = {
# image
'stability-ai/sdxl': [
"39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
"2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
"7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
# Model versions for generating images
'stability-ai/stable-diffusion-3': [
"527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
],
'ai-forever/kandinsky-2.2': [
"ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
'bytedance/sdxl-lightning-4step': [
"5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
],
'playgroundai/playground-v2.5-1024px-aesthetic': [
"a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
],
# Text
'meta/llama-2-70b-chat': [
"dp-542693885b1777c98ef8c5a98f2005e7"
# Model versions for text generation
'meta/meta-llama-3-70b-instruct': [
"dp-cf04fe09351e25db628e8b6181276547"
],
'mistralai/mistral-7b-instruct-v0.2': [
'mistralai/mixtral-8x7b-instruct-v0.1': [
"dp-89e00f489d498885048e94f9809fbc76"
],
'google-deepmind/gemma-2b-it': [
"dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
]
}
image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(

View File

@ -0,0 +1,62 @@
from __future__ import annotations
from typing import Any, Dict
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.teach-anything.com"
api_endpoint = "/api/generate"
working = True
default_model = "llama-3-70b-instruct"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str | None = None,
**kwargs: Any
) -> AsyncResult:
headers = cls._get_headers()
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {"prompt": prompt}
timeout = ClientTimeout(total=60)
async with session.post(
f"{cls.url}{cls.api_endpoint}",
json=data,
proxy=proxy,
timeout=timeout
) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()
@staticmethod
def _get_headers() -> Dict[str, str]:
return {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://www.teach-anything.com",
"priority": "u=1, i",
"referer": "https://www.teach-anything.com/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}

View File

@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
"gpt-4o-mini",
"gpt-4o",
"gpt-4",
"gpt-4-turbo",
"claude-instant",
"claude-2",
"gpt-4",
"claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
"gemini-pro",
"claude-2",
"llama-3.1-70b",
"llama-3",
"gemini-1-5-flash",
"gemini-1-5-pro",
"gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
"llama3",
"zephyr",
"dolphin-2.5",
default_vision_model,
*image_models
]
model_aliases = {
"claude-v2": "claude-2",
}
_cookies = None
_cookies_used = 0
_telemetry_ids = []
@ -220,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
}
}

View File

@ -11,10 +11,12 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
@ -25,16 +27,22 @@ from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .LiteIcoding import LiteIcoding
from .Llama import Llama
from .Local import Local
from .MagickPenAsk import MagickPenAsk
from .MagickPenChat import MagickPenChat
from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
@ -44,6 +52,7 @@ from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You

View File

@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
default_model = ""
@classmethod
async def create_async_generator(
@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
}
}

View File

@ -55,16 +55,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
"text-davinci-002-render-sha": "gpt-3.5-turbo",
"": "gpt-3.5-turbo",
"gpt-4-turbo-preview": "gpt-4",
"dall-e": "gpt-4",
}

View File

@ -229,8 +229,8 @@
<option value="">Model: Default</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
<option value="llama2-70b">llama2-70b</option>
<option value="llama3-70b-instruct">llama3-70b-instruct</option>
<option value="llama-3-70b-chat">llama-3-70b-chat</option>
<option value="llama-3.1-70b">llama-3.1-70b</option>
<option value="gemini-pro">gemini-pro</option>
<option value="">----</option>
</select>

View File

@ -5,35 +5,44 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
AI365VIP,
Bing,
Blackbox,
Chatgpt4o,
ChatgptFree,
DDG,
DeepInfra,
DeepInfraImage,
FreeChatgpt,
FreeGpt,
Gemini,
GeminiPro,
GeminiProChat,
GigaChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
MetaAI,
OpenaiChat,
PerplexityLabs,
Pi,
Pizzagpt,
Reka,
Replicate,
ReplicateHome,
Vercel,
You,
Allyfy,
Bing,
Blackbox,
ChatGot,
Chatgpt4o,
Chatgpt4Online,
ChatgptFree,
DDG,
DeepInfra,
DeepInfraImage,
FreeChatgpt,
FreeGpt,
FreeNetfly,
Gemini,
GeminiPro,
GeminiProChat,
GigaChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
LiteIcoding,
MagickPenAsk,
MagickPenChat,
Marsyoo,
MetaAI,
OpenaiChat,
PerplexityLabs,
Pi,
Pizzagpt,
Reka,
Replicate,
ReplicateHome,
TeachAnything,
You,
)
@dataclass(unsafe_hash=True)
class Model:
"""
@ -77,13 +86,13 @@ gpt_35_long = Model(
best_provider = IterListProvider([
FreeGpt,
You,
OpenaiChat,
Koala,
ChatgptFree,
FreeChatgpt,
DDG,
AI365VIP,
Pizzagpt,
Allyfy,
])
)
@ -101,71 +110,43 @@ gpt_35_turbo = Model(
FreeGpt,
You,
Koala,
OpenaiChat,
ChatgptFree,
FreeChatgpt,
DDG,
AI365VIP,
Pizzagpt,
Allyfy,
])
)
gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai',
best_provider = gpt_35_long.best_provider
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider = gpt_35_turbo.best_provider
)
# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
Bing, Liaobots,
Bing, Chatgpt4Online
])
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai',
best_provider = gpt_4.best_provider
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
best_provider = Bing
best_provider = IterListProvider([
Bing, Liaobots, LiteIcoding
])
)
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
You, Liaobots, Chatgpt4o, AI365VIP
You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding, MagickPenAsk,
])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
best_provider = IterListProvider([
DDG, Liaobots, OpenaiChat, You, FreeNetfly, MagickPenChat,
])
)
@ -185,34 +166,40 @@ meta = Model(
best_provider = MetaAI
)
llama_2_70b_chat = Model(
name = "meta/llama-2-70b-chat",
base_provider = "meta",
best_provider = IterListProvider([ReplicateHome])
)
llama3_8b_instruct = Model(
llama_3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
llama3_70b_instruct = Model(
llama_3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, HuggingChat, DDG])
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
codellama_34b_instruct = Model(
name = "codellama/CodeLlama-34b-Instruct-hf",
llama_3_70b_instruct = Model(
name = "meta/meta-llama-3-70b-instruct",
base_provider = "meta",
best_provider = HuggingChat
best_provider = IterListProvider([ReplicateHome, TeachAnything])
)
codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
llama_3_70b_chat_hf = Model(
name = "meta-llama/Llama-3-70b-chat-hf",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra])
best_provider = IterListProvider([DDG])
)
llama_3_1_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
llama_3_1_405b_instruct_FP8 = Model(
name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
@ -220,13 +207,13 @@ codellama_70b_instruct = Model(
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG])
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat, ReplicateHome])
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
)
@ -239,7 +226,7 @@ Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
### 01-ai ###
Yi_1_5_34B_Chat = Model(
Yi_1_5_34B_chat = Model(
name = "01-ai/Yi-1.5-34B-Chat",
base_provider = "01-ai",
best_provider = IterListProvider([HuggingFace, HuggingChat])
@ -265,10 +252,28 @@ gemini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
best_provider = IterListProvider([GeminiPro, You, GeminiProChat])
best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots, LiteIcoding])
)
gemini_flash = Model(
name = 'gemini-flash',
base_provider = 'Google',
best_provider = IterListProvider([Liaobots])
)
gemini_1_5 = Model(
name = 'gemini-1.5',
base_provider = 'Google',
best_provider = IterListProvider([LiteIcoding])
)
# gemma
gemma_2b_it = Model(
name = 'gemma-2b-it',
base_provider = 'Google',
best_provider = IterListProvider([ReplicateHome])
)
gemma_2_9b_it = Model(
name = 'gemma-2-9b-it',
base_provider = 'Google',
@ -283,28 +288,58 @@ gemma_2_27b_it = Model(
### Anthropic ###
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
best_provider = IterListProvider([Vercel])
claude_2 = Model(
name = 'claude-2',
base_provider = 'Anthropic',
best_provider = IterListProvider([You])
)
claude_2_0 = Model(
name = 'claude-2.0',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_2_1 = Model(
name = 'claude-2.1',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'anthropic',
best_provider = You
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'anthropic',
best_provider = You
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
)
claude_3_5_sonnet = Model(
name = 'claude-3-5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'anthropic',
best_provider = IterListProvider([DDG, AI365VIP])
base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
claude_3 = Model(
name = 'claude-3',
base_provider = 'Anthropic',
best_provider = IterListProvider([LiteIcoding])
)
claude_3_5 = Model(
name = 'claude-3.5',
base_provider = 'Anthropic',
best_provider = IterListProvider([LiteIcoding])
)
@ -348,6 +383,58 @@ command_r_plus = Model(
)
### iFlytek ###
SparkDesk_v1_1 = Model(
name = 'SparkDesk-v1.1',
base_provider = 'iFlytek',
best_provider = IterListProvider([FreeChatgpt])
)
### DeepSeek ###
deepseek_coder = Model(
name = 'deepseek-coder',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
deepseek_chat = Model(
name = 'deepseek-chat',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
### Qwen ###
Qwen2_7B_instruct = Model(
name = 'Qwen2-7B-Instruct',
base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt])
)
### Zhipu AI ###
glm4_9B_chat = Model(
name = 'glm4-9B-chat',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
chatglm3_6B = Model(
name = 'chatglm3-6B',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
### 01-ai ###
Yi_1_5_9B_chat = Model(
name = 'Yi-1.5-9B-Chat',
base_provider = '01-ai',
best_provider = IterListProvider([FreeChatgpt])
)
### Other ###
pi = Model(
name = 'pi',
@ -364,14 +451,27 @@ pi = Model(
sdxl = Model(
name = 'stability-ai/sdxl',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
best_provider = IterListProvider([DeepInfraImage])
)
### AI Forever ###
kandinsky_2_2 = Model(
name = 'ai-forever/kandinsky-2.2',
base_provider = 'AI Forever',
stable_diffusion_3 = Model(
name = 'stability-ai/stable-diffusion-3',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
sdxl_lightning_4step = Model(
name = 'bytedance/sdxl-lightning-4step',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
playground_v2_5_1024px_aesthetic = Model(
name = 'playgroundai/playground-v2.5-1024px-aesthetic',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
@ -385,113 +485,124 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
############
### Text ###
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
############
### Text ###
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
# gpt-3.5
'gpt-3.5-turbo' : gpt_35_turbo,
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
'gpt-3.5-turbo': gpt_35_turbo,
'gpt-3.5-long': gpt_35_long,
# gpt-4
'gpt-4o' : gpt_4o,
'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
'gpt-4-0613' : gpt_4_0613,
'gpt-4-32k' : gpt_4_32k,
'gpt-4-32k-0613' : gpt_4_32k_0613,
'gpt-4-turbo' : gpt_4_turbo,
### Meta ###
### Meta ###
"meta-ai": meta,
'llama-2-70b-chat': llama_2_70b_chat,
'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
'llama3-70b-instruct': llama3_70b_instruct,
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
'llama-3-8b-instruct': llama_3_8b_instruct,
'llama-3-70b-instruct': llama_3_70b_instruct,
'llama-3-70b-chat': llama_3_70b_chat_hf,
'llama-3-70b-instruct': llama_3_70b_instruct,
'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,
### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b-v02': mistral_7b_v02,
### NousResearch ###
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
### 01-ai ###
'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
### Microsoft ###
'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
### 01-ai ###
'Yi-1.5-34b-chat': Yi_1_5_34B_chat,
### Microsoft ###
'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-pro': gemini_1_5,
'gemini-flash': gemini_flash,
# gemma
'gemma-2-9b-it': gemma_2_9b_it,
'gemma-2-27b-it': gemma_2_27b_it,
'gemma-2b': gemma_2b_it,
'gemma-2-9b': gemma_2_9b_it,
'gemma-2-27b': gemma_2_27b_it,
### Anthropic ###
'claude-v2': claude_v2,
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
'claude-3-opus': claude_3,
'claude-3-5-sonnet': claude_3_5,
### Reka AI ###
'reka': reka_core,
### NVIDIA ###
'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
'command-r+': command_r_plus,
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
'command-r+': command_r_plus,
### Databricks ###
'dbrx-instruct': dbrx_instruct,
### GigaChat ###
### GigaChat ###
'gigachat': gigachat,
### iFlytek ###
'SparkDesk-v1.1': SparkDesk_v1_1,
### DeepSeek ###
'deepseek-coder': deepseek_coder,
'deepseek-chat': deepseek_chat,
### Qwen ###
'Qwen2-7b-instruct': Qwen2_7B_instruct,
### Zhipu AI ###
'glm4-9b-chat': glm4_9B_chat,
'chatglm3-6b': chatglm3_6B,
### 01-ai ###
'Yi-1.5-9b-chat': Yi_1_5_9B_chat,
# Other
'pi': pi,
'pi': pi,
#############
### Image ###
#############
### Stability AI ###
'sdxl': sdxl,
### Image ###
#############
### AI Forever ###
'kandinsky-2.2': kandinsky_2_2,
### Stability AI ###
'sdxl': sdxl,
'stable-diffusion-3': stable_diffusion_3,
### ByteDance ###
'sdxl-lightning': sdxl_lightning_4step,
### Playground ###
'playground-v2.5': playground_v2_5_1024px_aesthetic,
}
_all_models = list(ModelUtils.convert.keys())