diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py new file mode 100644 index 00000000..16d69f3c --- /dev/null +++ b/g4f/Provider/GptTalkRu.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + + +class GptTalkRu(AsyncGeneratorProvider): + url = "https://gpttalk.ru" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + if not model: + model = "gpt-3.5-turbo" + headers = { + "Accept": "application/json, text/plain, */*", + "Accept-Language": "en-US", + "Connection": "keep-alive", + "Content-Type": "application/json", + "Origin": "https://gpttalk.ru", + "Referer": "https://gpttalk.ru/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36", + "sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + } + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "modelType": 1, + "prompt": messages, + "responseType": "stream", + } + async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode() \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 2cc90b43..2b47b071 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -33,6 +33,7 @@ from .GptChatly import GptChatly from .GptForLove import GptForLove from .GptGo import GptGo from .GptGod import GptGod +from .GptTalkRu import GptTalkRu from .Hashnode import Hashnode from .Koala import Koala from .Liaobots import Liaobots