Add ChatgptDuo and Aibn Provider

Add support for "nest_asyncio",
Reuse event_loops with event_loop_policy
Support for  "create_async" with synchron provider
This commit is contained in:
Heiner Lohaus 2023-09-26 10:03:37 +02:00
parent 72c3ff7a25
commit 3c2755bc72
10 changed files with 226 additions and 94 deletions

51
g4f/Provider/Aibn.py Normal file
View File

@ -0,0 +1,51 @@
from __future__ import annotations
import time
import hashlib
from ..typing import AsyncGenerator
from g4f.requests import AsyncSession
from .base_provider import AsyncGeneratorProvider
class Aibn(AsyncGeneratorProvider):
url = "https://aibn.cc"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with AsyncSession(impersonate="chrome107") as session:
timestamp = int(time.time())
data = {
"messages": messages,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
"time": timestamp
}
async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -0,0 +1,51 @@
from __future__ import annotations
from g4f.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
class ChatgptDuo(AsyncProvider):
url = "https://chatgptduo.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> str:
async with AsyncSession(impersonate="chrome107") as session:
prompt = format_prompt(messages),
data = {
"prompt": prompt,
"search": prompt,
"purpose": "ask",
}
async with session.post(f"{cls.url}/", data=data) as response:
response.raise_for_status()
data = await response.json()
cls._sources = [{
"title": source["title"],
"url": source["link"],
"snippet": source["snippet"]
} for source in data["results"]]
return data["answer"]
@classmethod
def get_sources(cls):
return cls._sources
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -58,7 +58,7 @@ class PerplexityAi(AsyncProvider):
result = json.loads(json.loads(line[3:])[0]["text"])
cls._sources = [{
"name": source["name"],
"title": source["name"],
"url": source["url"],
"snippet": source["snippet"]
} for source in result["web_results"]]

View File

@ -62,7 +62,7 @@ class Vercel(BaseProvider):
response.raise_for_status()
except:
continue
for token in response.iter_content(chunk_size=8):
for token in response.iter_content(chunk_size=None):
yield token.decode()
break

View File

@ -1,5 +1,6 @@
from __future__ import annotations
from .Acytoo import Acytoo
from .Aibn import Aibn
from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
@ -10,6 +11,7 @@ from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatgptAi import ChatgptAi
from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
from .CodeLinkAva import CodeLinkAva
from .DeepAi import DeepAi
@ -49,6 +51,7 @@ __all__ = [
'AsyncGeneratorProvider',
'RetryProvider',
'Acytoo',
'Aibn',
'Aichat',
'Ails',
'AiService',
@ -59,6 +62,7 @@ __all__ = [
'Bing',
'ChatBase',
'ChatgptAi',
'ChatgptDuo',
'ChatgptLogin',
'CodeLinkAva',
'DeepAi',

View File

@ -1,13 +1,10 @@
from __future__ import annotations
import asyncio
import functools
from asyncio import SelectorEventLoop, AbstractEventLoop
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
import browser_cookie3
from .helper import get_event_loop, get_cookies, format_prompt
from ..typing import AsyncGenerator, CreateResult
@ -40,20 +37,18 @@ class BaseProvider(ABC):
**kwargs
) -> str:
if not loop:
loop = asyncio.get_event_loop()
partial_func = functools.partial(
cls.create_completion,
loop = get_event_loop()
def create_func():
return "".join(cls.create_completion(
model,
messages,
False,
**kwargs
)
response = await loop.run_in_executor(
))
return await loop.run_in_executor(
executor,
partial_func
create_func
)
return "".join(response)
@classmethod
@property
@ -76,11 +71,9 @@ class AsyncProvider(BaseProvider):
stream: bool = False,
**kwargs
) -> CreateResult:
loop = create_event_loop()
try:
yield loop.run_until_complete(cls.create_async(model, messages, **kwargs))
finally:
loop.close()
loop = get_event_loop()
coro = cls.create_async(model, messages, **kwargs)
yield loop.run_until_complete(coro)
@staticmethod
@abstractmethod
@ -103,8 +96,7 @@ class AsyncGeneratorProvider(AsyncProvider):
stream: bool = True,
**kwargs
) -> CreateResult:
loop = create_event_loop()
try:
loop = get_event_loop()
generator = cls.create_async_generator(
model,
messages,
@ -117,8 +109,6 @@ class AsyncGeneratorProvider(AsyncProvider):
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break
finally:
loop.close()
@classmethod
async def create_async(
@ -144,37 +134,3 @@ class AsyncGeneratorProvider(AsyncProvider):
**kwargs
) -> AsyncGenerator:
raise NotImplementedError()
# Don't create a new event loop in a running async loop.
# Force use selector event loop on windows and linux use it anyway.
def create_event_loop() -> SelectorEventLoop:
try:
asyncio.get_running_loop()
except RuntimeError:
return SelectorEventLoop()
raise RuntimeError(
'Use "create_async" instead of "create" function in a running event loop.')
_cookies = {}
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]

54
g4f/Provider/helper.py Normal file
View File

@ -0,0 +1,54 @@
from __future__ import annotations
import asyncio
import sys
from asyncio import AbstractEventLoop
import browser_cookie3
_cookies: dict[str, dict[str, str]] = {}
# Use own event_loop_policy with a selector event loop on windows.
if sys.platform == 'win32':
_event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy()
else:
_event_loop_policy = asyncio.get_event_loop_policy()
# If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop.
def get_event_loop() -> AbstractEventLoop:
try:
asyncio.get_running_loop()
except RuntimeError:
return _event_loop_policy.get_event_loop()
try:
event_loop = _event_loop_policy.get_event_loop()
if not hasattr(event_loop.__class__, "_nest_patched"):
import nest_asyncio
nest_asyncio.apply(event_loop)
return event_loop
except ImportError:
raise RuntimeError(
'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.')
# Load cookies for a domain from all supported browser.
# Cache the results in the "_cookies" variable
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]

View File

@ -61,13 +61,8 @@ class ChatCompletion:
provider : Union[type[BaseProvider], None] = None,
**kwargs
) -> str:
model, provider = get_model_and_provider(model, provider, False)
provider_type = provider if isinstance(provider, type) else type(provider)
if not issubclass(provider_type, AsyncProvider):
raise Exception(f"Provider: {provider.__name__} doesn't support create_async")
return await provider.create_async(model.name, messages, **kwargs)
class Completion:

View File

@ -20,6 +20,8 @@ from .Provider import (
AItianhuSpace,
Aichat,
Myshell,
Aibn,
ChatgptDuo,
)
@dataclass(unsafe_hash=True)
@ -39,7 +41,8 @@ default = Model(
Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell,
ChatgptDuo, # Include search results
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn,
])
)
@ -48,7 +51,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn,
])
)
@ -56,7 +59,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Aivvm, Myshell, AItianhuSpace,
Myshell, AItianhuSpace,
])
)

View File

@ -1,12 +1,13 @@
from __future__ import annotations
import json, sys
from functools import partialmethod
from aiohttp import StreamReader
from aiohttp.base_protocol import BaseProtocol
from curl_cffi.requests import AsyncSession
from curl_cffi.requests.cookies import Request
from curl_cffi.requests.cookies import Response
from curl_cffi.requests import AsyncSession as BaseSession
from curl_cffi.requests.cookies import Request, Response
class StreamResponse:
@ -17,6 +18,8 @@ class StreamResponse:
self.status_code = inner.status_code
self.reason = inner.reason
self.ok = inner.ok
self.headers = inner.headers
self.cookies = inner.cookies
async def text(self) -> str:
content = await self.content.read()
@ -29,7 +32,6 @@ class StreamResponse:
async def json(self, **kwargs):
return json.loads(await self.content.read(), **kwargs)
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
@ -50,10 +52,13 @@ class StreamRequest:
def on_done(self, task):
self.content.feed_eof()
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)
async def __aenter__(self) -> StreamResponse:
self.curl = await self.session.pop_curl()
self.enter = self.session.loop.create_future()
self.enter = self.loop.create_future()
request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
@ -61,8 +66,8 @@ class StreamRequest:
content_callback=self.on_content,
**self.options
)
handle = self.session.acurl.add_handle(self.curl)
self.handle = self.session.loop.create_task(handle)
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
self.handle.add_done_callback(self.on_done)
await self.enter
return StreamResponse(
@ -72,7 +77,20 @@ class StreamRequest:
)
async def __aexit__(self, exc_type, exc, tb):
await self.handle
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)
pass
class AsyncSession(BaseSession):
def request(
self,
method: str,
url: str,
**kwargs
) -> StreamRequest:
return StreamRequest(self, method, url, **kwargs)
head = partialmethod(request, "HEAD")
get = partialmethod(request, "GET")
post = partialmethod(request, "POST")
put = partialmethod(request, "PUT")
patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE")