mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-27 13:42:19 +03:00
"create_async" support for BaseProvider,
by using ThreadPoolExecutor Default Model for Vercel
This commit is contained in:
parent
f1b6880f7e
commit
348670fe35
73
g4f/Provider/AItianhuSpace.py
Normal file
73
g4f/Provider/AItianhuSpace.py
Normal file
@ -0,0 +1,73 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random, json
|
||||
|
||||
from g4f.requests import AsyncSession, StreamRequest
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
domains = {
|
||||
"gpt-3.5-turbo": ".aitianhu.space",
|
||||
"gpt-4": ".aitianhu.website",
|
||||
}
|
||||
|
||||
class AItianhuSpace(AsyncGeneratorProvider):
|
||||
url = "https://chat3.aiyunos.top/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = True,
|
||||
**kwargs
|
||||
) -> str:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
elif not model in domains:
|
||||
raise ValueError(f"Model are not supported: {model}")
|
||||
|
||||
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
||||
rand = ''.join(random.choice(chars) for _ in range(6))
|
||||
domain = domains[model]
|
||||
url = f'https://{rand}{domain}/api/chat-process'
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
|
||||
data = {
|
||||
"prompt": format_prompt(messages),
|
||||
"options": {},
|
||||
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||
"temperature": 0.8,
|
||||
"top_p": 1,
|
||||
**kwargs
|
||||
}
|
||||
async with StreamRequest(session, "POST", url, json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
line = json.loads(line.rstrip())
|
||||
if "detail" in line:
|
||||
content = line["detail"]["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
||||
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
|
||||
raise RuntimeError("Rate limit for GPT 4 reached")
|
||||
else:
|
||||
raise RuntimeError("Response: {line}")
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("temperature", "float"),
|
||||
("top_p", "int"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -60,9 +60,10 @@ class Aivvm(BaseProvider):
|
||||
|
||||
response = requests.post(
|
||||
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
|
||||
response.raise_for_status()
|
||||
|
||||
for line in response.iter_content(chunk_size=1048):
|
||||
yield line.decode('utf-8')
|
||||
for chunk in response.iter_content(chunk_size=None):
|
||||
yield chunk.decode('utf-8')
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
|
@ -4,6 +4,7 @@ from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .AItianhuSpace import AItianhuSpace
|
||||
from .Aivvm import Aivvm
|
||||
from .Bard import Bard
|
||||
from .Bing import Bing
|
||||
@ -52,6 +53,7 @@ __all__ = [
|
||||
'Ails',
|
||||
'AiService',
|
||||
'AItianhu',
|
||||
'AItianhuSpace',
|
||||
'Aivvm',
|
||||
'Bard',
|
||||
'Bing',
|
||||
|
78
g4f/requests.py
Normal file
78
g4f/requests.py
Normal file
@ -0,0 +1,78 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json, sys
|
||||
from aiohttp import StreamReader
|
||||
from aiohttp.base_protocol import BaseProtocol
|
||||
|
||||
from curl_cffi.requests import AsyncSession
|
||||
from curl_cffi.requests.cookies import Request
|
||||
from curl_cffi.requests.cookies import Response
|
||||
|
||||
|
||||
class StreamResponse:
|
||||
def __init__(self, inner: Response, content: StreamReader, request: Request):
|
||||
self.inner = inner
|
||||
self.content = content
|
||||
self.request = request
|
||||
self.status_code = inner.status_code
|
||||
self.reason = inner.reason
|
||||
self.ok = inner.ok
|
||||
|
||||
async def text(self) -> str:
|
||||
content = await self.content.read()
|
||||
return content.decode()
|
||||
|
||||
def raise_for_status(self):
|
||||
if not self.ok:
|
||||
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
|
||||
|
||||
async def json(self, **kwargs):
|
||||
return json.loads(await self.content.read(), **kwargs)
|
||||
|
||||
|
||||
class StreamRequest:
|
||||
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
|
||||
self.session = session
|
||||
self.loop = session.loop
|
||||
self.content = StreamReader(
|
||||
BaseProtocol(session.loop),
|
||||
sys.maxsize,
|
||||
loop=session.loop
|
||||
)
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.options = kwargs
|
||||
|
||||
def on_content(self, data):
|
||||
if not self.enter.done():
|
||||
self.enter.set_result(None)
|
||||
self.content.feed_data(data)
|
||||
|
||||
def on_done(self, task):
|
||||
self.content.feed_eof()
|
||||
|
||||
async def __aenter__(self) -> StreamResponse:
|
||||
self.curl = await self.session.pop_curl()
|
||||
self.enter = self.session.loop.create_future()
|
||||
request, _, header_buffer = self.session._set_curl_options(
|
||||
self.curl,
|
||||
self.method,
|
||||
self.url,
|
||||
content_callback=self.on_content,
|
||||
**self.options
|
||||
)
|
||||
handle = self.session.acurl.add_handle(self.curl)
|
||||
self.handle = self.session.loop.create_task(handle)
|
||||
self.handle.add_done_callback(self.on_done)
|
||||
await self.enter
|
||||
return StreamResponse(
|
||||
self.session._parse_response(self.curl, request, _, header_buffer),
|
||||
self.content,
|
||||
request
|
||||
)
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
await self.handle
|
||||
self.curl.clean_after_perform()
|
||||
self.curl.reset()
|
||||
self.session.push_curl(self.curl)
|
Loading…
Reference in New Issue
Block a user