2024-01-10 12:34:56 +03:00
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
import json
|
|
|
|
from aiohttp import ClientSession
|
|
|
|
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
|
|
from .base_provider import AsyncGeneratorProvider
|
|
|
|
|
|
|
|
|
2024-01-13 17:58:12 +03:00
|
|
|
models = {
|
2024-01-13 18:18:31 +03:00
|
|
|
"claude-v2": "claude-2.0",
|
|
|
|
"gemini-pro": "google-gemini-pro"
|
2024-01-13 17:58:12 +03:00
|
|
|
}
|
|
|
|
|
2024-01-10 12:34:56 +03:00
|
|
|
class FreeChatgpt(AsyncGeneratorProvider):
|
|
|
|
url = "https://free.chatgpt.org.uk"
|
|
|
|
working = True
|
|
|
|
supports_gpt_35_turbo = True
|
|
|
|
supports_gpt_4 = True
|
|
|
|
supports_message_history = True
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
async def create_async_generator(
|
|
|
|
cls,
|
|
|
|
model: str,
|
|
|
|
messages: Messages,
|
|
|
|
proxy: str = None,
|
|
|
|
**kwargs
|
|
|
|
) -> AsyncResult:
|
2024-01-13 18:18:31 +03:00
|
|
|
if model in models:
|
|
|
|
model = models[model]
|
|
|
|
elif not model:
|
|
|
|
model = "gpt-3.5-turbo"
|
2024-01-10 12:34:56 +03:00
|
|
|
headers = {
|
2024-01-13 18:18:31 +03:00
|
|
|
"Accept": "application/json, text/event-stream",
|
|
|
|
"Content-Type":"application/json",
|
|
|
|
"Accept-Encoding": "gzip, deflate, br",
|
|
|
|
"Accept-Language": "en-US,en;q=0.5",
|
|
|
|
"Host":"free.chatgpt.org.uk",
|
|
|
|
"Referer":f"{cls.url}/",
|
|
|
|
"Origin":f"{cls.url}",
|
|
|
|
"Sec-Fetch-Dest": "empty",
|
|
|
|
"Sec-Fetch-Mode": "cors",
|
|
|
|
"Sec-Fetch-Site": "same-origin",
|
|
|
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
|
|
}
|
2024-01-10 12:34:56 +03:00
|
|
|
async with ClientSession(headers=headers) as session:
|
2024-01-13 18:18:31 +03:00
|
|
|
data = {
|
|
|
|
"messages":messages,
|
|
|
|
"stream":True,
|
|
|
|
"model":model,
|
|
|
|
"temperature":0.5,
|
|
|
|
"presence_penalty":0,
|
|
|
|
"frequency_penalty":0,
|
|
|
|
"top_p":1,
|
|
|
|
**kwargs
|
|
|
|
}
|
|
|
|
async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
|
|
|
|
response.raise_for_status()
|
|
|
|
started = False
|
|
|
|
async for line in response.content:
|
|
|
|
if line.startswith(b"data: [DONE]"):
|
|
|
|
break
|
|
|
|
elif line.startswith(b"data: "):
|
|
|
|
line = json.loads(line[6:])
|
|
|
|
if(line["choices"]==[]):
|
|
|
|
continue
|
|
|
|
chunk = line["choices"][0]["delta"].get("content")
|
|
|
|
if chunk:
|
|
|
|
started = True
|
|
|
|
yield chunk
|
|
|
|
if not started:
|
|
|
|
raise RuntimeError("Empty response")
|