mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-26 04:33:35 +03:00
86 lines
3.0 KiB
Python
86 lines
3.0 KiB
Python
|
from __future__ import annotations
|
||
|
|
||
|
import json
|
||
|
from aiohttp import ClientSession
|
||
|
|
||
|
from ..typing import AsyncResult, Messages
|
||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||
|
from .helper import format_prompt
|
||
|
|
||
|
|
||
|
class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||
|
url = "https://www.aiuncensored.info"
|
||
|
api_endpoint = "https://darkai.foundation/chat"
|
||
|
working = True
|
||
|
supports_stream = True
|
||
|
supports_system_message = True
|
||
|
supports_message_history = True
|
||
|
|
||
|
default_model = 'gpt-4o'
|
||
|
models = [
|
||
|
default_model, # Uncensored
|
||
|
'gpt-3.5-turbo', # Uncensored
|
||
|
'llama-3-70b', # Uncensored
|
||
|
'llama-3-405b',
|
||
|
]
|
||
|
|
||
|
model_aliases = {
|
||
|
"llama-3.1-70b": "llama-3-70b",
|
||
|
"llama-3.1-405b": "llama-3-405b",
|
||
|
}
|
||
|
|
||
|
@classmethod
|
||
|
def get_model(cls, model: str) -> str:
|
||
|
if model in cls.models:
|
||
|
return model
|
||
|
elif model in cls.model_aliases:
|
||
|
return cls.model_aliases[model]
|
||
|
else:
|
||
|
return cls.default_model
|
||
|
|
||
|
@classmethod
|
||
|
async def create_async_generator(
|
||
|
cls,
|
||
|
model: str,
|
||
|
messages: Messages,
|
||
|
proxy: str = None,
|
||
|
**kwargs
|
||
|
) -> AsyncResult:
|
||
|
model = cls.get_model(model)
|
||
|
|
||
|
headers = {
|
||
|
"accept": "text/event-stream",
|
||
|
"content-type": "application/json",
|
||
|
"origin": "https://www.aiuncensored.info",
|
||
|
"referer": "https://www.aiuncensored.info/",
|
||
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
||
|
}
|
||
|
async with ClientSession(headers=headers) as session:
|
||
|
prompt = format_prompt(messages)
|
||
|
data = {
|
||
|
"query": prompt,
|
||
|
"model": model,
|
||
|
}
|
||
|
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||
|
response.raise_for_status()
|
||
|
full_text = ""
|
||
|
async for chunk in response.content:
|
||
|
if chunk:
|
||
|
try:
|
||
|
chunk_str = chunk.decode().strip()
|
||
|
if chunk_str.startswith('data: '):
|
||
|
chunk_data = json.loads(chunk_str[6:])
|
||
|
if chunk_data['event'] == 'text-chunk':
|
||
|
full_text += chunk_data['data']['text']
|
||
|
elif chunk_data['event'] == 'stream-end':
|
||
|
if full_text:
|
||
|
yield full_text.strip()
|
||
|
return
|
||
|
except json.JSONDecodeError:
|
||
|
print(f"Failed to decode JSON: {chunk_str}")
|
||
|
except Exception as e:
|
||
|
print(f"Error processing chunk: {e}")
|
||
|
|
||
|
if full_text:
|
||
|
yield full_text.strip()
|