2023-10-12 21:28:54 +03:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2023-12-02 07:40:07 +03:00
|
|
|
from ..requests import Session, get_session_from_browser
|
2023-10-21 02:52:19 +03:00
|
|
|
from ..typing import Messages
|
2023-10-12 21:28:54 +03:00
|
|
|
from .base_provider import AsyncProvider
|
|
|
|
|
|
|
|
|
|
|
|
class GptChatly(AsyncProvider):
|
2023-10-27 23:59:14 +03:00
|
|
|
url = "https://gptchatly.com"
|
|
|
|
working = True
|
|
|
|
supports_message_history = True
|
2023-10-12 21:28:54 +03:00
|
|
|
supports_gpt_35_turbo = True
|
2023-10-27 23:59:14 +03:00
|
|
|
supports_gpt_4 = True
|
2023-10-12 21:28:54 +03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
async def create_async(
|
|
|
|
cls,
|
|
|
|
model: str,
|
|
|
|
messages: Messages,
|
2023-12-02 07:40:07 +03:00
|
|
|
proxy: str = None,
|
|
|
|
timeout: int = 120,
|
|
|
|
session: Session = None,
|
|
|
|
**kwargs
|
|
|
|
) -> str:
|
|
|
|
if not session:
|
|
|
|
session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout)
|
2023-10-12 21:28:54 +03:00
|
|
|
if model.startswith("gpt-4"):
|
|
|
|
chat_url = f"{cls.url}/fetch-gpt4-response"
|
|
|
|
else:
|
2023-12-02 07:40:07 +03:00
|
|
|
chat_url = f"{cls.url}/felch-response"
|
|
|
|
data = {
|
|
|
|
"past_conversations": messages
|
2023-10-12 21:28:54 +03:00
|
|
|
}
|
2023-12-02 07:40:07 +03:00
|
|
|
response = session.post(chat_url, json=data)
|
|
|
|
response.raise_for_status()
|
|
|
|
return response.json()["chatGPTResponse"]
|