diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/ChatgptDemo.py index 666b5753..2f25477a 100644 --- a/g4f/Provider/ChatgptDemo.py +++ b/g4f/Provider/ChatgptDemo.py @@ -10,7 +10,7 @@ from .helper import format_prompt class ChatgptDemo(AsyncGeneratorProvider): url = "https://chat.chatgptdemo.net" supports_gpt_35_turbo = True - working = False + working = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/Gpt6.py b/g4f/Provider/Gpt6.py new file mode 100644 index 00000000..b8a294e2 --- /dev/null +++ b/g4f/Provider/Gpt6.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider +from .helper import format_prompt + + +class Gpt6(AsyncGeneratorProvider): + url = "https://gpt6.ai" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Content-Type": "application/json", + "Origin": "https://gpt6.ai", + "Connection": "keep-alive", + "Referer": "https://gpt6.ai/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "cross-site", + "TE": "trailers", + } + async with ClientSession(headers=headers) as session: + data = { + "prompts":messages, + "geoInfo":{"ip":"100.90.100.222","hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de","city":"Muenchen","region":"North Rhine-Westphalia","country":"DE","loc":"44.0910,5.5827","org":"AS3209 Vodafone GmbH","postal":"41507","timezone":"Europe/Berlin"}, + "paid":False, + "character":{"textContent":"","id":"52690ad6-22e4-4674-93d4-1784721e9944","name":"GPT6","htmlContent":""} + } + async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line.startswith(b"data: [DONE]"): + break + elif line.startswith(b"data: "): + line = json.loads(line[6:-1]) + + chunk = line["choices"][0]["delta"].get("content") + if chunk: + yield chunk \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 5f2b8344..4670d331 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -29,6 +29,7 @@ from .Chatxyz import Chatxyz from .DeepInfra import DeepInfra from .FakeGpt import FakeGpt from .FreeGpt import FreeGpt +from .Gpt6 import Gpt6 from .GPTalk import GPTalk from .GptChatly import GptChatly from .GptForLove import GptForLove diff --git a/g4f/models.py b/g4f/models.py index b1e85a5b..9a4539c5 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -8,6 +8,7 @@ from .Provider import ( ChatAnywhere, ChatgptNext, HuggingChat, + ChatgptDemo, GptForLove, ChatgptAi, DeepInfra, @@ -23,6 +24,7 @@ from .Provider import ( Phind, Koala, GptGo, + Gpt6, Bard, Bing, You, @@ -65,6 +67,8 @@ gpt_35_long = Model( ChatgptDemoAi, OnlineGpt, ChatgptNext, + ChatgptDemo, + Gpt6, ]) )