Add FakeGpt Provider

Update providers in models
This commit is contained in:
Heiner Lohaus 2023-10-22 01:22:25 +02:00
parent f125f714fb
commit a3af9fac3e
3 changed files with 108 additions and 15 deletions

94
g4f/Provider/FakeGpt.py Normal file
View File

@ -0,0 +1,94 @@
from __future__ import annotations
import uuid, time, random, string, json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class FakeGpt(AsyncGeneratorProvider):
url = "https://chat-shared2.zhile.io"
supports_gpt_35_turbo = True
working = True
_access_token = None
_cookie_jar = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
"Referer": "https://chat-shared2.zhile.io/?v=2",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-mobile": "?0",
}
async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
if not cls._access_token:
async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
response.raise_for_status()
list = (await response.json())["loads"]
token_ids = [t["token_id"] for t in list if t["count"] == 0]
data = {
"token_key": random.choice(token_ids),
"session_password": random_string()
}
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
response.raise_for_status()
async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
response.raise_for_status()
cls._access_token = (await response.json())["accessToken"]
cls._cookie_jar = session.cookie_jar
headers = {
"Content-Type": "application/json",
"Accept": "text/event-stream",
"X-Authorization": f"Bearer {cls._access_token}",
}
prompt = format_prompt(messages)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
"metadata": {},
}
],
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"plugin_ids": [],
"timezone_offset_min": -120,
"suggestions": [],
"history_and_training_disabled": True,
"arkose_token": "",
"force_paragen": False,
}
last_message = ""
async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
async for line in response.content:
if line.startswith(b"data: "):
line = line[6:]
if line == b"[DONE]":
break
try:
line = json.loads(line)
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
except:
continue
if not last_message:
raise RuntimeError("No valid response")
def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))

View File

@ -17,6 +17,7 @@ from .ChatgptFree import ChatgptFree
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX
from .Cromicle import Cromicle
from .FakeGpt import FakeGpt
from .FreeGpt import FreeGpt
from .GPTalk import GPTalk
from .GptChatly import GptChatly
@ -73,6 +74,7 @@ class ProviderUtils:
'Equing': Equing,
'FastGpt': FastGpt,
'Forefront': Forefront,
'FakeGpt': FakeGpt,
'FreeGpt': FreeGpt,
'GPTalk': GPTalk,
'GptChatly': GptChatly,
@ -143,6 +145,7 @@ __all__ = [
'DfeHub',
'EasyChat',
'Forefront',
'FakeGpt',
'FreeGpt',
'GPTalk',
'GptChatly',

View File

@ -4,19 +4,18 @@ from .typing import Union
from .Provider import BaseProvider, RetryProvider
from .Provider import (
ChatgptLogin,
ChatgptDemo,
ChatgptDuo,
GptForLove,
Opchatgpts,
ChatgptAi,
GptChatly,
Liaobots,
ChatgptX,
ChatBase,
Yqcloud,
GeekGpt,
FakeGpt,
Myshell,
FreeGpt,
Cromicle,
NoowAi,
Vercel,
Aichat,
@ -30,9 +29,6 @@ from .Provider import (
Bing,
You,
H2o,
ChatForAi,
ChatBase
)
@dataclass(unsafe_hash=True)
@ -50,9 +46,8 @@ default = Model(
base_provider = "",
best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4
Yqcloud, # Answers short questions in chinese
ChatgptDuo, # Include search results
Aibn, Aichat, ChatgptAi, ChatgptLogin, FreeGpt, GptGo, Myshell, Ylokh, GeekGpt
AiAsk, Aichat, ChatgptAi, FreeGpt, GptGo, GeekGpt,
Phind, You
])
)
@ -61,9 +56,10 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
NoowAi, GeekGpt, Phind
AiAsk, Aichat, FreeGpt, You,
GptChatly, GptForLove,
NoowAi, GeekGpt, Phind,
FakeGpt
])
)
@ -72,8 +68,8 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider=RetryProvider([
ChatgptX, ChatgptDemo, GptGo, You,
NoowAi, GPTalk, GptForLove, Phind, ChatBase, Cromicle
ChatgptX, GptGo, You,
NoowAi, GPTalk, GptForLove, Phind, ChatBase
])
)
@ -81,7 +77,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Bing, GeekGpt, Liaobots, Phind
Bing, GeekGpt, Phind
])
)