mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-24 03:23:49 +03:00
fix for 500 Internal Server Error #2199 [Request] Blackbox provider now support Gemini and LLaMa 3.1 models #2198 with some stuff from #2196
This commit is contained in:
parent
a338ed5883
commit
bda2d67927
@ -35,31 +35,35 @@ class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"dnt": "1",
|
||||
"origin": "https://chat.ai365vip.com",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://chat.ai365vip.com/en",
|
||||
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/en",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"127.0.6533.119"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-ch-ua-platform-version": '"4.19.276"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"model": {
|
||||
"id": model,
|
||||
"name": {
|
||||
"gpt-3.5-turbo": "GPT-3.5",
|
||||
"claude-3-haiku-20240307": "claude-3-haiku",
|
||||
"gpt-4o": "GPT-4O"
|
||||
}.get(model, model),
|
||||
},
|
||||
"messages": [{"role": "user", "content": format_prompt(messages)}],
|
||||
"prompt": "You are a helpful assistant.",
|
||||
}
|
||||
"model": {
|
||||
"id": model,
|
||||
"name": "GPT-3.5",
|
||||
"maxLength": 3000,
|
||||
"tokenLimit": 2048
|
||||
},
|
||||
"messages": [{"role": "user", "content": format_prompt(messages)}],
|
||||
"key": "",
|
||||
"prompt": "You are a helpful assistant.",
|
||||
"temperature": 1
|
||||
}
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
|
64
g4f/Provider/AiChatOnline.py
Normal file
64
g4f/Provider/AiChatOnline.py
Normal file
@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_random_string, format_prompt
|
||||
|
||||
class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
site_url = "https://aichatonline.org"
|
||||
url = "https://aichatonlineorg.erweima.ai"
|
||||
api_endpoint = "/aichatonline/api/chat/gpt"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
default_model = 'gpt-4o-mini'
|
||||
supports_message_history = False
|
||||
|
||||
@classmethod
|
||||
async def grab_token(
|
||||
cls,
|
||||
session: ClientSession,
|
||||
proxy: str
|
||||
):
|
||||
async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
return (await response.json())['data']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/chatgpt/chat/",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Alt-Used": "aichatonline.org",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"TE": "trailers"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"conversationId": get_random_string(),
|
||||
"prompt": format_prompt(messages),
|
||||
}
|
||||
headers['UniqueId'] = await cls.grab_token(session, proxy)
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
try:
|
||||
yield json.loads(chunk)['data']['message']
|
||||
except:
|
||||
continue
|
@ -14,7 +14,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.blackbox.ai"
|
||||
working = True
|
||||
default_model = 'blackbox'
|
||||
|
||||
models = [
|
||||
default_model,
|
||||
"gemini-1.5-flash",
|
||||
"llama-3.1-8b",
|
||||
'llama-3.1-70b',
|
||||
'llama-3.1-405b',
|
||||
]
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -28,7 +34,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if image is not None:
|
||||
messages[-1]["data"] = {
|
||||
"fileText": image_name,
|
||||
"imageBase64": to_data_uri(image)
|
||||
"imageBase64": to_data_uri(image),
|
||||
"title": str(uuid.uuid4())
|
||||
}
|
||||
|
||||
headers = {
|
||||
@ -48,7 +55,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
async with ClientSession(headers=headers) as session:
|
||||
random_id = secrets.token_hex(16)
|
||||
random_user_id = str(uuid.uuid4())
|
||||
|
||||
model_id_map = {
|
||||
"blackbox": {},
|
||||
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
||||
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
||||
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
||||
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
|
||||
}
|
||||
data = {
|
||||
"messages": messages,
|
||||
"id": random_id,
|
||||
@ -62,6 +75,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"webSearchMode": False,
|
||||
"userSystemPrompt": "",
|
||||
"githubToken": None,
|
||||
"trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
|
||||
"maxTokens": None
|
||||
}
|
||||
|
||||
|
@ -14,8 +14,8 @@ class Chatgpt4Online(AsyncGeneratorProvider):
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
async def get_nonce():
|
||||
async with ClientSession() as session:
|
||||
async def get_nonce(headers: dict) -> str:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
|
||||
return (await response.json())["restNonce"]
|
||||
|
||||
@ -42,9 +42,8 @@ class Chatgpt4Online(AsyncGeneratorProvider):
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
"x-wp-nonce": await cls.get_nonce(),
|
||||
}
|
||||
|
||||
headers['x-wp-nonce'] = await cls.get_nonce(headers)
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
|
@ -2,21 +2,25 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
import asyncio
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from ..typing import Messages
|
||||
from .base_provider import AsyncProvider
|
||||
from ..typing import Messages, AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatgptFree(AsyncProvider):
|
||||
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chatgptfree.ai"
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
working = True
|
||||
_post_id = None
|
||||
_nonce = None
|
||||
default_model = 'gpt-4o-mini-2024-07-18'
|
||||
model_aliases = {
|
||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
@ -24,7 +28,7 @@ class ChatgptFree(AsyncProvider):
|
||||
timeout: int = 120,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
) -> AsyncGenerator[str, None]:
|
||||
headers = {
|
||||
'authority': 'chatgptfree.ai',
|
||||
'accept': '*/*',
|
||||
@ -39,7 +43,6 @@ class ChatgptFree(AsyncProvider):
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
async with StreamSession(
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
@ -50,19 +53,11 @@ class ChatgptFree(AsyncProvider):
|
||||
|
||||
if not cls._nonce:
|
||||
async with session.get(f"{cls.url}/") as response:
|
||||
|
||||
await raise_for_status(response)
|
||||
response = await response.text()
|
||||
|
||||
result = re.search(r'data-post-id="([0-9]+)"', response)
|
||||
if not result:
|
||||
raise RuntimeError("No post id found")
|
||||
cls._post_id = result.group(1)
|
||||
|
||||
result = re.search(r'data-nonce="(.*?)"', response)
|
||||
if result:
|
||||
cls._nonce = result.group(1)
|
||||
|
||||
else:
|
||||
raise RuntimeError("No nonce found")
|
||||
|
||||
@ -75,10 +70,30 @@ class ChatgptFree(AsyncProvider):
|
||||
"message": prompt,
|
||||
"bot_id": "0"
|
||||
}
|
||||
async with session.get(f"{cls.url}/wp-admin/admin-ajax.php", params=data, cookies=cookies) as response:
|
||||
|
||||
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
|
||||
await raise_for_status(response)
|
||||
full_answer = ""
|
||||
for line in ((await response.text()).splitlines())[:-1]:
|
||||
if line.startswith("data:") and "[DONE]" not in line:
|
||||
full_answer += json.loads(line[5:])['choices'][0]['delta'].get('content', "")
|
||||
return full_answer
|
||||
buffer = ""
|
||||
async for line in response.iter_lines():
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith('data: '):
|
||||
data = line[6:]
|
||||
if data == '[DONE]':
|
||||
break
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
content = json_data['choices'][0]['delta'].get('content', '')
|
||||
if content:
|
||||
yield content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
elif line:
|
||||
buffer += line
|
||||
|
||||
if buffer:
|
||||
try:
|
||||
json_response = json.loads(buffer)
|
||||
if 'data' in json_response:
|
||||
yield json_response['data']
|
||||
except json.JSONDecodeError:
|
||||
print(f"Failed to decode final JSON. Buffer content: {buffer}")
|
@ -25,7 +25,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"gpt-4o": "gpt-4o-mini",
|
||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
||||
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
}
|
||||
|
||||
# Obfuscated URLs and headers
|
||||
|
@ -11,11 +11,7 @@ class DeepInfra(Openai):
|
||||
needs_auth = True
|
||||
supports_stream = True
|
||||
supports_message_history = True
|
||||
default_model = "meta-llama/Meta-Llama-3-70B-Instruct"
|
||||
default_vision_model = "llava-hf/llava-1.5-7b-hf"
|
||||
model_aliases = {
|
||||
'dbrx-instruct': 'databricks/dbrx-instruct',
|
||||
}
|
||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
|
@ -16,13 +16,31 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
'gpt-3.5-turbo',
|
||||
'SparkDesk-v1.1',
|
||||
'deepseek-coder',
|
||||
'@cf/qwen/qwen1.5-14b-chat-awq',
|
||||
'deepseek-chat',
|
||||
'Qwen2-7B-Instruct',
|
||||
'glm4-9B-chat',
|
||||
'chatglm3-6B',
|
||||
'Yi-1.5-9B-Chat',
|
||||
]
|
||||
model_aliases = {
|
||||
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
|
||||
"sparkdesk-v1.1": "SparkDesk-v1.1",
|
||||
"qwen2-7b": "Qwen2-7B-Instruct",
|
||||
"glm4-9b": "glm4-9B-chat",
|
||||
"chatglm3-6b": "chatglm3-6B",
|
||||
"yi-1.5-9b": "Yi-1.5-9B-Chat",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model.lower() in cls.model_aliases:
|
||||
return cls.model_aliases[model.lower()]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -46,6 +64,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
}
|
||||
model = cls.get_model(model)
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
@ -74,5 +93,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
chunk = json.loads(line_str[6:])
|
||||
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
|
||||
accumulated_text += delta_content
|
||||
yield delta_content
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
@ -6,23 +6,25 @@ import random
|
||||
from typing import AsyncGenerator, Optional, Dict, Any
|
||||
from ..typing import Messages
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..errors import RateLimitError
|
||||
|
||||
# Constants
|
||||
DOMAINS = [
|
||||
"https://s.aifree.site",
|
||||
"https://v.aifree.site/"
|
||||
"https://v.aifree.site/",
|
||||
"https://al.aifree.site/",
|
||||
"https://u4.aifree.site/"
|
||||
]
|
||||
RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
|
||||
|
||||
|
||||
class FreeGpt(AsyncGeneratorProvider):
|
||||
url: str = "https://freegptsnav.aifree.site"
|
||||
working: bool = True
|
||||
supports_message_history: bool = True
|
||||
supports_system_message: bool = True
|
||||
supports_gpt_35_turbo: bool = True
|
||||
class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://freegptsnav.aifree.site"
|
||||
working = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
default_model = 'llama-3.1-70b'
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
@ -54,8 +54,8 @@ class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"top_p": 1
|
||||
}
|
||||
|
||||
max_retries = 3
|
||||
retry_delay = 1
|
||||
max_retries = 5
|
||||
retry_delay = 2
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
|
@ -2,16 +2,16 @@ from __future__ import annotations
|
||||
|
||||
import json, requests, re
|
||||
|
||||
from curl_cffi import requests as cf_reqs
|
||||
from ..typing import CreateResult, Messages
|
||||
from curl_cffi import requests as cf_reqs
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import ProviderModelMixin, AbstractProvider
|
||||
from .helper import format_prompt
|
||||
from .helper import format_prompt
|
||||
|
||||
class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/chat"
|
||||
working = True
|
||||
url = "https://huggingface.co/chat"
|
||||
working = True
|
||||
supports_stream = True
|
||||
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
||||
models = [
|
||||
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
||||
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
|
||||
@ -19,24 +19,41 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
||||
'01-ai/Yi-1.5-34B-Chat',
|
||||
'mistralai/Mistral-7B-Instruct-v0.2',
|
||||
'mistralai/Mistral-7B-Instruct-v0.3',
|
||||
'microsoft/Phi-3-mini-4k-instruct',
|
||||
]
|
||||
|
||||
|
||||
model_aliases = {
|
||||
"mistralai/Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.2"
|
||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
|
||||
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
||||
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
|
||||
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
|
||||
"phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs) -> CreateResult:
|
||||
|
||||
if (model in cls.models) :
|
||||
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
if model in cls.models:
|
||||
session = cf_reqs.Session()
|
||||
session.headers = {
|
||||
'accept': '*/*',
|
||||
@ -54,29 +71,24 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
print(model)
|
||||
json_data = {
|
||||
'model': model,
|
||||
}
|
||||
|
||||
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
|
||||
conversationId = response.json()['conversationId']
|
||||
|
||||
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
|
||||
|
||||
data: list = (response.json())["nodes"][1]["data"]
|
||||
keys: list[int] = data[data[0]["messages"]]
|
||||
message_keys: dict = data[keys[0]]
|
||||
messageId: str = data[message_keys["id"]]
|
||||
|
||||
settings = {
|
||||
"inputs":format_prompt(messages),
|
||||
"id":messageId,
|
||||
"is_retry":False,
|
||||
"is_continue":False,
|
||||
"web_search":False,
|
||||
"tools":[]
|
||||
"inputs": format_prompt(messages),
|
||||
"id": messageId,
|
||||
"is_retry": False,
|
||||
"is_continue": False,
|
||||
"web_search": False,
|
||||
"tools": []
|
||||
}
|
||||
|
||||
headers = {
|
||||
@ -96,9 +108,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
|
||||
files = {
|
||||
'data': (None, json.dumps(settings, separators=(',', ':'))),
|
||||
'data': (None, json.dumps(settings, separators=(',', ':'))),
|
||||
}
|
||||
|
||||
response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
|
||||
@ -106,7 +117,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
headers=headers,
|
||||
files=files,
|
||||
)
|
||||
|
||||
first_token = True
|
||||
for line in response.iter_lines():
|
||||
line = json.loads(line)
|
||||
@ -119,11 +129,10 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
||||
if first_token:
|
||||
token = token.lstrip().replace('\u0000', '')
|
||||
first_token = False
|
||||
|
||||
else:
|
||||
token = token.replace('\u0000', '')
|
||||
|
||||
yield (token)
|
||||
|
||||
yield token
|
||||
|
||||
elif line["type"] == "finalAnswer":
|
||||
break
|
@ -1,20 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession, BaseConnector
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_connector
|
||||
from ..errors import RateLimitError, ModelNotFoundError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
|
||||
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/chat"
|
||||
working = True
|
||||
needs_auth = True
|
||||
supports_message_history = True
|
||||
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
||||
models = [
|
||||
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
||||
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
|
||||
@ -22,10 +19,30 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
||||
'01-ai/Yi-1.5-34B-Chat',
|
||||
'mistralai/Mistral-7B-Instruct-v0.2',
|
||||
'mistralai/Mistral-7B-Instruct-v0.3',
|
||||
'microsoft/Phi-3-mini-4k-instruct',
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
|
||||
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
||||
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
|
||||
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
|
||||
"phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -40,10 +57,26 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
temperature: float = 0.7,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model) if not model else model
|
||||
headers = {}
|
||||
model = cls.get_model(model)
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en',
|
||||
'cache-control': 'no-cache',
|
||||
'origin': 'https://huggingface.co',
|
||||
'pragma': 'no-cache',
|
||||
'priority': 'u=1, i',
|
||||
'referer': 'https://huggingface.co/chat/',
|
||||
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
||||
}
|
||||
if api_key is not None:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
params = {
|
||||
"return_full_text": False,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
@ -51,6 +84,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
**kwargs
|
||||
}
|
||||
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
|
||||
|
||||
async with ClientSession(
|
||||
headers=headers,
|
||||
connector=get_connector(connector, proxy)
|
||||
@ -72,7 +106,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
yield chunk
|
||||
else:
|
||||
yield (await response.json())[0]["generated_text"].strip()
|
||||
|
||||
def format_prompt(messages: Messages) -> str:
|
||||
system_messages = [message["content"] for message in messages if message["role"] == "system"]
|
||||
question = " ".join([messages[-1]["content"], *system_messages])
|
||||
@ -81,4 +114,4 @@ def format_prompt(messages: Messages) -> str:
|
||||
for idx, message in enumerate(messages)
|
||||
if message["role"] == "assistant"
|
||||
])
|
||||
return f"{history}<s>[INST] {question} [/INST]"
|
||||
return f"{history}<s>[INST] {question} [/INST]"
|
@ -4,7 +4,7 @@ import json
|
||||
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
|
||||
from aiohttp import ClientSession, BaseConnector, ClientResponse
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..typing import Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import get_random_string, get_connector
|
||||
from ..requests import raise_for_status
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
|
||||
import requests
|
||||
from aiohttp import ClientSession, BaseConnector
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
@ -9,74 +9,6 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_connector
|
||||
from ..requests import raise_for_status
|
||||
|
||||
models = {
|
||||
"gpt-4o-mini-free": {
|
||||
"id": "gpt-4o-mini-free",
|
||||
"name": "GPT-4o-Mini-Free",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 31200,
|
||||
"tokenLimit": 7800,
|
||||
"context": "8K",
|
||||
},
|
||||
"gpt-4o-mini": {
|
||||
"id": "gpt-4o-mini",
|
||||
"name": "GPT-4o-Mini",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-free": {
|
||||
"context": "8K",
|
||||
"id": "gpt-4o-free",
|
||||
"maxLength": 31200,
|
||||
"model": "ChatGPT",
|
||||
"name": "GPT-4o-free",
|
||||
"provider": "OpenAI",
|
||||
"tokenLimit": 7800,
|
||||
},
|
||||
"gpt-4-turbo-2024-04-09": {
|
||||
"id": "gpt-4-turbo-2024-04-09",
|
||||
"name": "GPT-4-Turbo",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o": {
|
||||
"context": "128K",
|
||||
"id": "gpt-4o",
|
||||
"maxLength": 124000,
|
||||
"model": "ChatGPT",
|
||||
"name": "GPT-4o",
|
||||
"provider": "OpenAI",
|
||||
"tokenLimit": 62000,
|
||||
},
|
||||
"gpt-4-0613": {
|
||||
"id": "gpt-4-0613",
|
||||
"name": "GPT-4",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4-turbo": {
|
||||
"id": "gpt-4-turbo",
|
||||
"name": "GPT-4-Turbo",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://liaobots.site"
|
||||
working = True
|
||||
@ -85,25 +17,66 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-4o"
|
||||
models = list(models.keys())
|
||||
models = None
|
||||
model_aliases = {
|
||||
"gpt-4o-mini": "gpt-4o-mini-free",
|
||||
"gpt-4o": "gpt-4o-free",
|
||||
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-": "gpt-4-0613",
|
||||
"gpt-4o": "gpt-4o-2024-08-06",
|
||||
"gpt-4": "gpt-4-0613",
|
||||
|
||||
"claude-3-opus": "claude-3-opus-20240229",
|
||||
"claude-3-opus": "claude-3-opus-20240229-aws",
|
||||
"claude-3-opus": "claude-3-opus-20240229-gcp",
|
||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
||||
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
|
||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||
"gemini-pro": "gemini-1.5-pro-latest",
|
||||
"claude-2.1": "claude-2.1",
|
||||
|
||||
"gemini-pro": "gemini-1.0-pro-latest",
|
||||
"gemini-flash": "gemini-1.5-flash-latest",
|
||||
"gemini-pro": "gemini-1.5-pro-latest",
|
||||
}
|
||||
_auth_code = ""
|
||||
_cookie_jar = None
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if cls.models is None:
|
||||
url = 'https://liaobots.work/api/models'
|
||||
headers = {
|
||||
'accept': '/',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'application/json',
|
||||
'cookie': 'gkp2=ehnhUPJtkCgMmod8Sbxn',
|
||||
'origin': 'https://liaobots.work',
|
||||
'priority': 'u=1, i',
|
||||
'referer': 'https://liaobots.work/',
|
||||
'sec-ch-ua': '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"Linux"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
|
||||
}
|
||||
data = {'key': ''}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
try:
|
||||
models_data = response.json()
|
||||
cls.models = {model['id']: model for model in models_data}
|
||||
except (ValueError, KeyError) as e:
|
||||
print(f"Error processing JSON response: {e}")
|
||||
cls.models = {}
|
||||
else:
|
||||
print(f"Request failed with status code: {response.status_code}")
|
||||
cls.models = {}
|
||||
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -126,9 +99,10 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cookie_jar=cls._cookie_jar,
|
||||
connector=get_connector(connector, proxy, True)
|
||||
) as session:
|
||||
models = cls.get_models()
|
||||
data = {
|
||||
"conversationId": str(uuid.uuid4()),
|
||||
"model": models[model],
|
||||
"model": models[cls.get_model(model)],
|
||||
"messages": messages,
|
||||
"key": "",
|
||||
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
|
||||
@ -141,20 +115,11 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
try:
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
json={"authcode": cls._auth_code},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
await cls.ensure_auth_code(session)
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/chat",
|
||||
json=data,
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
@ -164,16 +129,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if chunk:
|
||||
yield chunk.decode(errors="ignore")
|
||||
except:
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
json={"authcode": "pTIQr4FTnVRfr"},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
await cls.initialize_auth_code(session)
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/chat",
|
||||
json=data,
|
||||
@ -186,7 +142,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
raise RuntimeError("Invalid session")
|
||||
if chunk:
|
||||
yield chunk.decode(errors="ignore")
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
"""
|
||||
@ -194,15 +149,16 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
models = cls.get_models()
|
||||
if model not in models:
|
||||
raise ValueError(f"Model '{model}' is not supported.")
|
||||
return model
|
||||
|
||||
@classmethod
|
||||
def is_supported(cls, model: str) -> bool:
|
||||
"""
|
||||
Check if the given model is supported.
|
||||
"""
|
||||
models = cls.get_models()
|
||||
return model in models or model in cls.model_aliases
|
||||
|
||||
@classmethod
|
||||
@ -220,7 +176,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
|
||||
@classmethod
|
||||
async def ensure_auth_code(cls, session: ClientSession) -> None:
|
||||
"""
|
||||
@ -228,3 +183,18 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
if not cls._auth_code:
|
||||
await cls.initialize_auth_code(session)
|
||||
|
||||
@classmethod
|
||||
async def refresh_auth_code(cls, session: ClientSession) -> None:
|
||||
"""
|
||||
Refresh the auth code by making a new request.
|
||||
"""
|
||||
await cls.initialize_auth_code(session)
|
||||
|
||||
@classmethod
|
||||
async def get_auth_code(cls, session: ClientSession) -> str:
|
||||
"""
|
||||
Get the current auth code, initializing it if necessary.
|
||||
"""
|
||||
await cls.ensure_auth_code(session)
|
||||
return cls._auth_code
|
@ -8,11 +8,11 @@ from .helper import format_prompt
|
||||
|
||||
|
||||
class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://api.magickpen.com"
|
||||
api_endpoint = "/ask"
|
||||
url = "https://magickpen.com/ask"
|
||||
api_endpoint = "https://api.magickpen.com/ask"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-4o"
|
||||
default_model = "gpt-4o-mini"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
@ -37,14 +37,14 @@ class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
'X-API-Secret': 'W252GY255JVYBS9NAM' # this for some reason is just hardcoded in the .js, it makes no sense
|
||||
'X-API-Secret': 'W252GY255JVYBS9NAM'
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"query": format_prompt(messages),
|
||||
"plan": "Pay as you go"
|
||||
}
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
|
@ -8,12 +8,11 @@ from .helper import format_prompt
|
||||
|
||||
|
||||
class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://api.magickpen.com"
|
||||
api_endpoint = "/chat/free"
|
||||
url = "https://magickpen.com/chat"
|
||||
api_endpoint = "https://api.magickpen.com/chat/free"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-4o-mini"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -44,7 +43,7 @@ class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
data = {
|
||||
"history": [{"role": "user", "content": format_prompt(messages)}]
|
||||
}
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
|
@ -1,15 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import Messages, AsyncResult
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
class Pizzagpt(AsyncGeneratorProvider):
|
||||
|
||||
class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.pizzagpt.it"
|
||||
api_endpoint = "/api/chatx-completion"
|
||||
supports_message_history = False
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = 'gpt-4o-mini'
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
@ -19,30 +23,28 @@ class Pizzagpt(AsyncGeneratorProvider):
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
payload = {
|
||||
"question": messages[-1]["content"]
|
||||
}
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/en",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
"X-Secret": "Marinara"
|
||||
"accept": "application/json",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/en",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
"x-secret": "Marinara"
|
||||
}
|
||||
|
||||
async with ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{cls.url}{cls.api_endpoint}",
|
||||
json=payload,
|
||||
proxy=proxy,
|
||||
headers=headers
|
||||
) as response:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"question": prompt
|
||||
}
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response_json = await response.json()
|
||||
yield response_json["answer"]["content"]
|
||||
content = response_json.get("answer", {}).get("content", "")
|
||||
yield content
|
@ -19,7 +19,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-3.5-turbo"
|
||||
default_model = "gpt-4o-mini"
|
||||
default_vision_model = "agent"
|
||||
image_models = ["dall-e"]
|
||||
models = [
|
||||
|
@ -12,6 +12,7 @@ from .needs_auth import *
|
||||
|
||||
from .AI365VIP import AI365VIP
|
||||
from .Allyfy import Allyfy
|
||||
from .AiChatOnline import AiChatOnline
|
||||
from .Aura import Aura
|
||||
from .Bing import Bing
|
||||
from .BingCreateImages import BingCreateImages
|
||||
|
@ -1,59 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_random_string
|
||||
|
||||
class AiChatOnline(AsyncGeneratorProvider):
|
||||
url = "https://aichatonline.org"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = False
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
||||
"Accept": "text/event-stream",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/chatgpt/chat/",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Alt-Used": "aichatonline.org",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"TE": "trailers"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"botId": "default",
|
||||
"customId": None,
|
||||
"session": get_random_string(16),
|
||||
"chatId": get_random_string(),
|
||||
"contextId": 7,
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"newImageId": None,
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk.startswith(b"data: "):
|
||||
data = json.loads(chunk[6:])
|
||||
if data["type"] == "live":
|
||||
yield data["data"]
|
||||
elif data["type"] == "end":
|
||||
break
|
@ -25,7 +25,7 @@ from .Aichat import Aichat
|
||||
from .Berlin import Berlin
|
||||
from .Phind import Phind
|
||||
from .AiAsk import AiAsk
|
||||
from .AiChatOnline import AiChatOnline
|
||||
from ..AiChatOnline import AiChatOnline
|
||||
from .ChatAnywhere import ChatAnywhere
|
||||
from .FakeGpt import FakeGpt
|
||||
from .GeekGpt import GeekGpt
|
||||
|
Loading…
Reference in New Issue
Block a user