mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-22 15:05:57 +03:00
Improve slim docker image example, clean up OpenaiChat provider (#2397)
* Improve slim docker image example, clean up OpenaiChat provider * Enhance event loop management for asynchronous generators * Fix attribute " shutdown_default_executor" not found in old python versions * asyncio file added with all async helpers
This commit is contained in:
parent
4be8e69ea2
commit
e4bfd9db5c
@ -105,15 +105,18 @@ docker run \
|
||||
hlohaus789/g4f:latest
|
||||
```
|
||||
|
||||
Or run this command to start the gui without a browser and in the debug mode:
|
||||
Start the GUI without a browser requirement and in debug mode.
|
||||
There's no need to update the Docker image every time.
|
||||
Simply remove the g4f package from the image and install the Python package:
|
||||
```bash
|
||||
docker pull hlohaus789/g4f:latest-slim
|
||||
docker run \
|
||||
-p 8080:8080 \
|
||||
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
|
||||
-v ${PWD}/generated_images:/app/generated_images \
|
||||
hlohaus789/g4f:latest-slim \
|
||||
python -m g4f.cli gui -debug
|
||||
rm -r -f /app/g4f/ \
|
||||
&& pip install -U g4f[slim] \
|
||||
&& python -m g4f.cli gui -d
|
||||
```
|
||||
|
||||
3. **Access the Client:**
|
||||
|
@ -15,18 +15,14 @@ try:
|
||||
has_nodriver = True
|
||||
except ImportError:
|
||||
has_nodriver = False
|
||||
try:
|
||||
from platformdirs import user_config_dir
|
||||
has_platformdirs = True
|
||||
except ImportError:
|
||||
has_platformdirs = False
|
||||
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...requests.aiohttp import StreamSession
|
||||
from ...requests import StreamSession
|
||||
from ...requests import get_nodriver
|
||||
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
|
||||
from ...errors import MissingAuthError, ResponseError
|
||||
from ...errors import MissingAuthError
|
||||
from ...providers.response import BaseConversation, FinishReason, SynthesizeData
|
||||
from ..helper import format_cookies
|
||||
from ..openai.har_file import get_request_config, NoValidHarFileError
|
||||
@ -62,7 +58,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_system_message = True
|
||||
default_model = "auto"
|
||||
default_vision_model = "gpt-4o"
|
||||
fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
|
||||
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
|
||||
vision_models = fallback_models
|
||||
image_models = fallback_models
|
||||
|
||||
@ -83,51 +79,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cls.models = cls.fallback_models
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create(
|
||||
cls,
|
||||
prompt: str = None,
|
||||
model: str = "",
|
||||
messages: Messages = [],
|
||||
action: str = "next",
|
||||
**kwargs
|
||||
) -> Response:
|
||||
"""
|
||||
Create a new conversation or continue an existing one
|
||||
|
||||
Args:
|
||||
prompt: The user input to start or continue the conversation
|
||||
model: The name of the model to use for generating responses
|
||||
messages: The list of previous messages in the conversation
|
||||
history_disabled: A flag indicating if the history and training should be disabled
|
||||
action: The type of action to perform, either "next", "continue", or "variant"
|
||||
conversation_id: The ID of the existing conversation, if any
|
||||
parent_id: The ID of the parent message, if any
|
||||
image: The image to include in the user input, if any
|
||||
**kwargs: Additional keyword arguments to pass to the generator
|
||||
|
||||
Returns:
|
||||
A Response object that contains the generator, action, messages, and options
|
||||
"""
|
||||
# Add the user input to the messages list
|
||||
if prompt is not None:
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": prompt
|
||||
})
|
||||
generator = cls.create_async_generator(
|
||||
model,
|
||||
messages,
|
||||
return_conversation=True,
|
||||
**kwargs
|
||||
)
|
||||
return Response(
|
||||
generator,
|
||||
action,
|
||||
messages,
|
||||
kwargs
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def upload_image(
|
||||
cls,
|
||||
@ -189,32 +140,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
image_data["download_url"] = (await response.json())["download_url"]
|
||||
return ImageRequest(image_data)
|
||||
|
||||
@classmethod
|
||||
async def get_default_model(cls, session: StreamSession, headers: dict):
|
||||
"""
|
||||
Get the default model name from the service
|
||||
|
||||
Args:
|
||||
session: The StreamSession object to use for requests
|
||||
headers: The headers to include in the requests
|
||||
|
||||
Returns:
|
||||
The default model name as a string
|
||||
"""
|
||||
if not cls.default_model:
|
||||
url = f"{cls.url}/backend-anon/models" if cls._api_key is None else f"{cls.url}/backend-api/models"
|
||||
async with session.get(url, headers=headers) as response:
|
||||
cls._update_request_args(session)
|
||||
if response.status == 401:
|
||||
raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
if "categories" in data:
|
||||
cls.default_model = data["categories"][-1]["default_model"]
|
||||
return cls.default_model
|
||||
raise ResponseError(data)
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
|
||||
"""
|
||||
@ -296,30 +221,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error in downloading image: {e}")
|
||||
|
||||
@classmethod
|
||||
async def delete_conversation(cls, session: StreamSession, headers: dict, conversation_id: str):
|
||||
"""
|
||||
Deletes a conversation by setting its visibility to False.
|
||||
|
||||
This method sends an HTTP PATCH request to update the visibility of a conversation.
|
||||
It's used to effectively delete a conversation from being accessed or displayed in the future.
|
||||
|
||||
Args:
|
||||
session (StreamSession): The StreamSession object used for making HTTP requests.
|
||||
headers (dict): HTTP headers to be used for the request.
|
||||
conversation_id (str): The unique identifier of the conversation to be deleted.
|
||||
|
||||
Raises:
|
||||
HTTPError: If the HTTP request fails or returns an unsuccessful status code.
|
||||
"""
|
||||
async with session.patch(
|
||||
f"{cls.url}/backend-api/conversation/{conversation_id}",
|
||||
json={"is_visible": False},
|
||||
headers=headers
|
||||
) as response:
|
||||
cls._update_request_args(session)
|
||||
...
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
@ -327,7 +228,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 180,
|
||||
api_key: str = None,
|
||||
cookies: Cookies = None,
|
||||
auto_continue: bool = False,
|
||||
history_disabled: bool = False,
|
||||
@ -465,7 +365,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
continue
|
||||
await raise_for_status(response)
|
||||
if return_conversation:
|
||||
history_disabled = False
|
||||
yield conversation
|
||||
async for line in response.iter_lines():
|
||||
async for chunk in cls.iter_messages_line(session, line, conversation):
|
||||
@ -483,19 +382,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
else:
|
||||
break
|
||||
yield FinishReason(conversation.finish_reason)
|
||||
if history_disabled and auto_continue:
|
||||
await cls.delete_conversation(session, cls._headers, conversation.conversation_id)
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_chunk(
|
||||
cls,
|
||||
messages: AsyncIterator,
|
||||
session: StreamSession,
|
||||
fields: Conversation,
|
||||
) -> AsyncIterator:
|
||||
async for message in messages:
|
||||
async for chunk in cls.iter_messages_line(session, message, fields):
|
||||
yield chunk
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation) -> AsyncIterator:
|
||||
@ -575,15 +461,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
@classmethod
|
||||
async def nodriver_auth(cls, proxy: str = None):
|
||||
if has_platformdirs:
|
||||
user_data_dir = user_config_dir("g4f-nodriver")
|
||||
else:
|
||||
user_data_dir = None
|
||||
debug.log(f"Open nodriver with user_dir: {user_data_dir}")
|
||||
browser = await nodriver.start(
|
||||
user_data_dir=user_data_dir,
|
||||
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
|
||||
)
|
||||
browser = await get_nodriver(proxy=proxy)
|
||||
page = browser.main_tab
|
||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
|
||||
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
||||
@ -622,14 +500,14 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
pass
|
||||
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
||||
RequestConfig.cookies[c.name] = c.value
|
||||
RequestConfig.user_agent = await page.evaluate("window.navigator.userAgent")
|
||||
user_agent = await page.evaluate("window.navigator.userAgent")
|
||||
await page.select("#prompt-textarea", 240)
|
||||
while True:
|
||||
if RequestConfig.proof_token:
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
await page.close()
|
||||
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=RequestConfig.user_agent)
|
||||
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
|
||||
cls._set_api_key(RequestConfig.access_token)
|
||||
|
||||
@staticmethod
|
||||
@ -673,89 +551,3 @@ class Conversation(BaseConversation):
|
||||
self.message_id = message_id
|
||||
self.finish_reason = finish_reason
|
||||
self.is_recipient = False
|
||||
|
||||
class Response():
|
||||
"""
|
||||
Class to encapsulate a response from the chat service.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
generator: AsyncResult,
|
||||
action: str,
|
||||
messages: Messages,
|
||||
options: dict
|
||||
):
|
||||
self._generator = generator
|
||||
self.action = action
|
||||
self.is_end = False
|
||||
self._message = None
|
||||
self._messages = messages
|
||||
self._options = options
|
||||
self._fields = None
|
||||
|
||||
async def generator(self) -> AsyncIterator:
|
||||
if self._generator is not None:
|
||||
self._generator = None
|
||||
chunks = []
|
||||
async for chunk in self._generator:
|
||||
if isinstance(chunk, Conversation):
|
||||
self._fields = chunk
|
||||
else:
|
||||
yield chunk
|
||||
chunks.append(str(chunk))
|
||||
self._message = "".join(chunks)
|
||||
if self._fields is None:
|
||||
raise RuntimeError("Missing response fields")
|
||||
self.is_end = self._fields.finish_reason == "stop"
|
||||
|
||||
def __aiter__(self):
|
||||
return self.generator()
|
||||
|
||||
async def get_message(self) -> str:
|
||||
await self.generator()
|
||||
return self._message
|
||||
|
||||
async def get_fields(self) -> dict:
|
||||
await self.generator()
|
||||
return {
|
||||
"conversation_id": self._fields.conversation_id,
|
||||
"parent_id": self._fields.message_id
|
||||
}
|
||||
|
||||
async def create_next(self, prompt: str, **kwargs) -> Response:
|
||||
return await OpenaiChat.create(
|
||||
**self._options,
|
||||
prompt=prompt,
|
||||
messages=await self.get_messages(),
|
||||
action="next",
|
||||
**await self.get_fields(),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async def do_continue(self, **kwargs) -> Response:
|
||||
fields = await self.get_fields()
|
||||
if self.is_end:
|
||||
raise RuntimeError("Can't continue message. Message already finished.")
|
||||
return await OpenaiChat.create(
|
||||
**self._options,
|
||||
messages=await self.get_messages(),
|
||||
action="continue",
|
||||
**fields,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async def create_variant(self, **kwargs) -> Response:
|
||||
if self.action != "next":
|
||||
raise RuntimeError("Can't create variant from continue or variant request.")
|
||||
return await OpenaiChat.create(
|
||||
**self._options,
|
||||
messages=self._messages,
|
||||
action="variant",
|
||||
**await self.get_fields(),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async def get_messages(self) -> list:
|
||||
messages = self._messages
|
||||
messages.append({"role": "assistant", "content": await self.message()})
|
||||
return messages
|
||||
|
@ -25,7 +25,6 @@ class NoValidHarFileError(Exception):
|
||||
pass
|
||||
|
||||
class RequestConfig:
|
||||
user_agent: str = None
|
||||
cookies: dict = None
|
||||
headers: dict = None
|
||||
access_request_id: str = None
|
||||
|
@ -6,7 +6,6 @@ import random
|
||||
import string
|
||||
import asyncio
|
||||
import base64
|
||||
import logging
|
||||
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional
|
||||
|
||||
from ..providers.base_provider import AsyncGeneratorProvider
|
||||
@ -16,13 +15,13 @@ from ..providers.types import ProviderType
|
||||
from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
|
||||
from ..errors import NoImageResponseError, ModelNotFoundError
|
||||
from ..providers.retry_provider import IterListProvider
|
||||
from ..providers.base_provider import get_running_loop
|
||||
from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
|
||||
from ..Provider.needs_auth.BingCreateImages import BingCreateImages
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
|
||||
from .image_models import ImageModels
|
||||
from .types import IterResponse, ImageProvider, Client as BaseClient
|
||||
from .service import get_model_and_provider, get_last_provider, convert_to_provider
|
||||
from .helper import find_stop, filter_json, filter_none, safe_aclose, to_sync_iter, to_async_iterator
|
||||
from .helper import find_stop, filter_json, filter_none, safe_aclose, to_async_iterator
|
||||
|
||||
ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
|
||||
AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
|
||||
@ -50,8 +49,7 @@ def iter_response(
|
||||
idx = 0
|
||||
|
||||
if hasattr(response, '__aiter__'):
|
||||
# It's an async iterator, wrap it into a sync iterator
|
||||
response = to_sync_iter(response)
|
||||
response = to_sync_generator(response)
|
||||
|
||||
for chunk in response:
|
||||
if isinstance(chunk, FinishReason):
|
||||
@ -231,10 +229,10 @@ class Completions:
|
||||
response = asyncio.run(response)
|
||||
if stream and hasattr(response, '__aiter__'):
|
||||
# It's an async generator, wrap it into a sync iterator
|
||||
response = to_sync_iter(response)
|
||||
response = to_sync_generator(response)
|
||||
elif hasattr(response, '__aiter__'):
|
||||
# If response is an async generator, collect it into a list
|
||||
response = list(to_sync_iter(response))
|
||||
response = asyncio.run(async_generator_to_list(response))
|
||||
response = iter_response(response, stream, response_format, max_tokens, stop)
|
||||
response = iter_append_model_and_provider(response)
|
||||
if stream:
|
||||
|
@ -1,10 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import queue
|
||||
import threading
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from typing import AsyncIterator, Iterator, AsyncGenerator, Optional
|
||||
|
||||
@ -53,33 +50,6 @@ async def safe_aclose(generator: AsyncGenerator) -> None:
|
||||
except Exception as e:
|
||||
logging.warning(f"Error while closing generator: {e}")
|
||||
|
||||
# Helper function to convert an async generator to a synchronous iterator
|
||||
def to_sync_iter(async_gen: AsyncIterator) -> Iterator:
|
||||
q = queue.Queue()
|
||||
loop = asyncio.new_event_loop()
|
||||
done = object()
|
||||
|
||||
def _run():
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
async def iterate():
|
||||
try:
|
||||
async for item in async_gen:
|
||||
q.put(item)
|
||||
finally:
|
||||
q.put(done)
|
||||
|
||||
loop.run_until_complete(iterate())
|
||||
loop.close()
|
||||
|
||||
threading.Thread(target=_run).start()
|
||||
|
||||
while True:
|
||||
item = q.get()
|
||||
if item is done:
|
||||
break
|
||||
yield item
|
||||
|
||||
# Helper function to convert a synchronous iterator to an async iterator
|
||||
async def to_async_iterator(iterator: Iterator) -> AsyncIterator:
|
||||
for item in iterator:
|
||||
|
@ -1,13 +1,13 @@
|
||||
import json
|
||||
import asyncio
|
||||
import flask
|
||||
import os
|
||||
from flask import request, Flask
|
||||
from typing import AsyncGenerator, Generator
|
||||
from typing import Generator
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
from g4f.image import is_allowed_extension, to_image
|
||||
from g4f.client.service import convert_to_provider
|
||||
from g4f.providers.asyncio import to_sync_generator
|
||||
from g4f.errors import ProviderNotFoundError
|
||||
from g4f.cookies import get_cookies_dir
|
||||
from .api import Api
|
||||
@ -19,21 +19,6 @@ def safe_iter_generator(generator: Generator) -> Generator:
|
||||
yield from generator
|
||||
return iter_generator()
|
||||
|
||||
def to_sync_generator(gen: AsyncGenerator) -> Generator:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
gen = gen.__aiter__()
|
||||
async def get_next():
|
||||
try:
|
||||
obj = await gen.__anext__()
|
||||
return False, obj
|
||||
except StopAsyncIteration: return True, None
|
||||
while True:
|
||||
done, obj = loop.run_until_complete(get_next())
|
||||
if done:
|
||||
break
|
||||
yield obj
|
||||
|
||||
class Backend_Api(Api):
|
||||
"""
|
||||
Handles various endpoints in a Flask application for backend operations.
|
||||
|
65
g4f/providers/asyncio.py
Normal file
65
g4f/providers/asyncio.py
Normal file
@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from asyncio import AbstractEventLoop, runners
|
||||
from typing import Union, Callable, AsyncGenerator, Generator
|
||||
|
||||
from ..errors import NestAsyncioError
|
||||
|
||||
try:
|
||||
import nest_asyncio
|
||||
has_nest_asyncio = True
|
||||
except ImportError:
|
||||
has_nest_asyncio = False
|
||||
try:
|
||||
import uvloop
|
||||
has_uvloop = True
|
||||
except ImportError:
|
||||
has_uvloop = False
|
||||
|
||||
def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]:
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
# Do not patch uvloop loop because its incompatible.
|
||||
if has_uvloop:
|
||||
if isinstance(loop, uvloop.Loop):
|
||||
return loop
|
||||
if not hasattr(loop.__class__, "_nest_patched"):
|
||||
if has_nest_asyncio:
|
||||
nest_asyncio.apply(loop)
|
||||
elif check_nested:
|
||||
raise NestAsyncioError('Install "nest_asyncio" package | pip install -U nest_asyncio')
|
||||
return loop
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
# Fix for RuntimeError: async generator ignored GeneratorExit
|
||||
async def await_callback(callback: Callable):
|
||||
return await callback()
|
||||
|
||||
async def async_generator_to_list(generator: AsyncGenerator) -> list:
|
||||
return [item async for item in generator]
|
||||
|
||||
def to_sync_generator(generator: AsyncGenerator) -> Generator:
|
||||
loop = get_running_loop(check_nested=False)
|
||||
new_loop = False
|
||||
if loop is None:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
new_loop = True
|
||||
gen = generator.__aiter__()
|
||||
try:
|
||||
while True:
|
||||
yield loop.run_until_complete(await_callback(gen.__anext__))
|
||||
except StopAsyncIteration:
|
||||
pass
|
||||
finally:
|
||||
if new_loop:
|
||||
try:
|
||||
runners._cancel_all_tasks(loop)
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
if hasattr(loop, "shutdown_default_executor"):
|
||||
loop.run_until_complete(loop.shutdown_default_executor())
|
||||
finally:
|
||||
asyncio.set_event_loop(None)
|
||||
loop.close()
|
@ -7,30 +7,14 @@ from asyncio import AbstractEventLoop
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from abc import abstractmethod
|
||||
from inspect import signature, Parameter
|
||||
from typing import Callable, Union
|
||||
|
||||
from ..typing import CreateResult, AsyncResult, Messages
|
||||
from .types import BaseProvider
|
||||
from .asyncio import get_running_loop, to_sync_generator
|
||||
from .response import FinishReason, BaseConversation, SynthesizeData
|
||||
from ..errors import NestAsyncioError, ModelNotSupportedError
|
||||
from ..errors import ModelNotSupportedError
|
||||
from .. import debug
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
NoneType = type(None)
|
||||
else:
|
||||
from types import NoneType
|
||||
|
||||
try:
|
||||
import nest_asyncio
|
||||
has_nest_asyncio = True
|
||||
except ImportError:
|
||||
has_nest_asyncio = False
|
||||
try:
|
||||
import uvloop
|
||||
has_uvloop = True
|
||||
except ImportError:
|
||||
has_uvloop = False
|
||||
|
||||
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
@ -41,26 +25,6 @@ if sys.platform == 'win32':
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]:
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
# Do not patch uvloop loop because its incompatible.
|
||||
if has_uvloop:
|
||||
if isinstance(loop, uvloop.Loop):
|
||||
return loop
|
||||
if not hasattr(loop.__class__, "_nest_patched"):
|
||||
if has_nest_asyncio:
|
||||
nest_asyncio.apply(loop)
|
||||
elif check_nested:
|
||||
raise NestAsyncioError('Install "nest_asyncio" package | pip install -U nest_asyncio')
|
||||
return loop
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
# Fix for RuntimeError: async generator ignored GeneratorExit
|
||||
async def await_callback(callback: Callable):
|
||||
return await callback()
|
||||
|
||||
class AbstractProvider(BaseProvider):
|
||||
"""
|
||||
Abstract class for providing asynchronous functionality to derived classes.
|
||||
@ -136,7 +100,6 @@ class AbstractProvider(BaseProvider):
|
||||
|
||||
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
|
||||
|
||||
|
||||
class AsyncProvider(AbstractProvider):
|
||||
"""
|
||||
Provides asynchronous functionality for creating completions.
|
||||
@ -218,25 +181,9 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
Returns:
|
||||
CreateResult: The result of the streaming completion creation.
|
||||
"""
|
||||
loop = get_running_loop(check_nested=False)
|
||||
new_loop = False
|
||||
if loop is None:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
new_loop = True
|
||||
|
||||
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
|
||||
gen = generator.__aiter__()
|
||||
|
||||
try:
|
||||
while True:
|
||||
yield loop.run_until_complete(await_callback(gen.__anext__))
|
||||
except StopAsyncIteration:
|
||||
pass
|
||||
finally:
|
||||
if new_loop:
|
||||
loop.close()
|
||||
asyncio.set_event_loop(None)
|
||||
return to_sync_generator(
|
||||
cls.create_async_generator(model, messages, stream=stream, **kwargs)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
|
17
setup.py
17
setup.py
@ -23,18 +23,27 @@ EXTRA_REQUIRE = {
|
||||
"browser_cookie3", # get_cookies
|
||||
"duckduckgo-search>=5.0" ,# internet.search
|
||||
"beautifulsoup4", # internet.search and bing.create_images
|
||||
"brotli", # openai, bing
|
||||
"platformdirs",
|
||||
"cryptography",
|
||||
"aiohttp_socks", # proxy
|
||||
"pillow", # image
|
||||
"cairosvg", # svg image
|
||||
"werkzeug", "flask", # gui
|
||||
"fastapi", # api
|
||||
"uvicorn", "nest_asyncio", # api
|
||||
"pycryptodome", # openai
|
||||
"uvicorn", # api
|
||||
"nodriver",
|
||||
],
|
||||
'slim': [
|
||||
"curl_cffi>=0.6.2",
|
||||
"certifi",
|
||||
"duckduckgo-search>=5.0" ,# internet.search
|
||||
"beautifulsoup4", # internet.search and bing.create_images
|
||||
"aiohttp_socks", # proxy
|
||||
"pillow", # image
|
||||
"cairosvg", # svg image
|
||||
"werkzeug", "flask", # gui
|
||||
"fastapi", # api
|
||||
"uvicorn", # api
|
||||
],
|
||||
"image": [
|
||||
"pillow",
|
||||
"cairosvg",
|
||||
|
Loading…
Reference in New Issue
Block a user