Move some modules, create providers dir

Set min version for duckduckgo
Make duckduckgo search async
Remove get_lastet_version
This commit is contained in:
Heiner Lohaus 2024-02-22 00:16:58 +01:00
parent 0a0698c7f3
commit e5b7f72b71
31 changed files with 448 additions and 435 deletions

View File

@ -167,7 +167,7 @@ To start the web interface, type the following codes in python:
from g4f.gui import run_gui
run_gui()
```
or type in command line:
or execute the following command:
```bash
python -m g4f.cli gui -port 8080 -debug
```
@ -182,7 +182,7 @@ See: [/docs/interference](/docs/interference.md)
##### Cookies / Access Token
For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
```python
from g4f.cookies import set_cookies

View File

@ -3,7 +3,7 @@ from urllib.parse import urlparse
import asyncio
from g4f import models, ChatCompletion
from g4f.base_provider import BaseProvider, BaseRetryProvider, ProviderType
from g4f.providers.types import BaseRetryProvider, ProviderType
from etc.testing._providers import get_providers
from g4f import debug

View File

@ -3,5 +3,7 @@ from .asyncio import *
from .backend import *
from .main import *
from .model import *
from .client import *
from .include import *
unittest.main()

View File

@ -1,9 +1,11 @@
import unittest
# import asyncio
from unittest.mock import MagicMock
from .mocks import ProviderMock
import g4f
try:
from g4f.gui.server.backend import Backend_Api, get_error_message
# from g4f.gui.server.internet import search
has_requirements = True
except:
has_requirements = False
@ -16,10 +18,10 @@ class TestBackendApi(unittest.TestCase):
self.app = MagicMock()
self.api = Backend_Api(self.app)
def test_version(self):
response = self.api.get_version()
self.assertIn("version", response)
self.assertIn("latest_version", response)
# def test_version(self):
# response = self.api.get_version()
# self.assertIn("version", response)
# self.assertIn("latest_version", response)
def test_get_models(self):
response = self.api.get_models()
@ -31,6 +33,10 @@ class TestBackendApi(unittest.TestCase):
self.assertIsInstance(response, list)
self.assertTrue(len(response) > 0)
# def test_search(self):
# result = asyncio.run(search("Hello"))
# self.assertEqual(5, len(result))
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):

View File

@ -43,7 +43,7 @@ class TestPassModel(unittest.TestCase):
for chunk in response:
self.assertEqual(chunk.choices[0].delta.content, "You ")
def no_test_stop(self):
def test_stop(self):
client = Client(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = client.chat.completions.create(messages, "Hello", stop=["and"])

View File

@ -1,11 +1,15 @@
import sys
import pathlib
import unittest
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
class TestImport(unittest.TestCase):
import g4f
def test_get_cookies(self):
from g4f import get_cookies as get_cookies_alias
from g4f.cookies import get_cookies
self.assertEqual(get_cookies_alias, get_cookies)
g4f.debug.logging = False
g4f.debug.version_check = False
def test_requests(self):
from g4f.requests import StreamSession
self.assertIsInstance(StreamSession, type)
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
if __name__ == '__main__':
unittest.main()

View File

@ -1,4 +1,4 @@
from g4f.Provider.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider
from g4f.providers.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider
class ProviderMock(AbstractProvider):
working = True

View File

@ -7,9 +7,9 @@ from aiohttp import ClientSession, FormData
from ..typing import AsyncGenerator, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider
from .helper import get_connector, format_prompt
from ..providers.helper import get_connector, format_prompt
from ..image import to_bytes
from ..defaults import DEFAULT_HEADERS
from ..requests.defaults import DEFAULT_HEADERS
class You(AsyncGeneratorProvider):
url = "https://you.com"

View File

@ -1,9 +1,10 @@
from __future__ import annotations
from ..base_provider import BaseProvider, ProviderType
from .retry_provider import RetryProvider
from .base_provider import AsyncProvider, AsyncGeneratorProvider
from .create_images import CreateImagesProvider
from ..providers.types import BaseProvider, ProviderType
from ..providers.retry_provider import RetryProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .selenium import *
from .needs_auth import *
@ -15,6 +16,7 @@ from .AItianhu import AItianhu
from .Aura import Aura
from .Bestim import Bestim
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .ChatAnywhere import ChatAnywhere
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
@ -53,8 +55,6 @@ from .Vercel import Vercel
from .Ylokh import Ylokh
from .You import You
from .BingCreateImages import BingCreateImages
import sys
__modules__: list = [

View File

@ -1,281 +1,2 @@
from __future__ import annotations
import sys
import asyncio
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import abstractmethod
from inspect import signature, Parameter
from .helper import get_cookies, format_prompt
from ..typing import CreateResult, AsyncResult, Messages, Union
from ..base_provider import BaseProvider
from ..errors import NestAsyncioError, ModelNotSupportedError
from .. import debug
if sys.version_info < (3, 10):
NoneType = type(None)
else:
from types import NoneType
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
if sys.platform == 'win32':
if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def get_running_loop() -> Union[AbstractEventLoop, None]:
try:
loop = asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
raise NestAsyncioError(
'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
)
return loop
except RuntimeError:
pass
class AbstractProvider(BaseProvider):
"""
Abstract class for providing asynchronous functionality to derived classes.
"""
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
"""
Asynchronously creates a result based on the given model and messages.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
loop = loop or asyncio.get_running_loop()
def create_func() -> str:
return "".join(cls.create_completion(model, messages, False, **kwargs))
return await asyncio.wait_for(
loop.run_in_executor(executor, create_func),
timeout=kwargs.get("timeout")
)
@classmethod
@property
def params(cls) -> str:
"""
Returns the parameters supported by the provider.
Args:
cls (type): The class on which this property is called.
Returns:
str: A string listing the supported parameters.
"""
sig = signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
)
def get_type_name(annotation: type) -> str:
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
args = ""
for name, param in sig.parameters.items():
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(AbstractProvider):
"""
Provides asynchronous functionality for creating completions.
"""
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
"""
Creates a completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to False.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the completion creation.
"""
get_running_loop()
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@abstractmethod
async def create_async(
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Abstract method for creating asynchronous results.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
str: The created result as a string.
"""
raise NotImplementedError()
class AsyncGeneratorProvider(AsyncProvider):
"""
Provides asynchronous generator functionality for streaming results.
"""
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
"""
Creates a streaming completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the streaming completion creation.
"""
loop = get_running_loop()
new_loop = False
if not loop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
new_loop = True
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
# Fix for RuntimeError: async generator ignored GeneratorExit
async def await_callback(callback):
return await callback()
try:
while True:
yield loop.run_until_complete(await_callback(gen.__anext__))
except StopAsyncIteration:
...
# Fix for: ResourceWarning: unclosed event loop
finally:
if new_loop:
loop.close()
asyncio.set_event_loop(None)
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Asynchronously creates a result from a generator.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
return "".join([
chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
if not isinstance(chunk, Exception)
])
@staticmethod
@abstractmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
"""
Abstract method for creating an asynchronous generator.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
AsyncResult: An asynchronous generator yielding results.
"""
raise NotImplementedError()
class ProviderModelMixin:
default_model: str
models: list[str] = []
model_aliases: dict[str, str] = {}
@classmethod
def get_models(cls) -> list[str]:
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
if not model:
model = cls.default_model
elif model in cls.model_aliases:
model = cls.model_aliases[model]
elif model not in cls.get_models():
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
debug.last_model = model
return model
from ..providers.base_provider import *
from .helper import get_cookies, format_prompt

View File

@ -17,9 +17,9 @@ try:
except ImportError:
has_requirements = False
from ..create_images import CreateImagesProvider
from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
from ...base_provider import ProviderType
from ...providers.types import ProviderType
from ...errors import MissingRequirementsError
from ...webdriver import WebDriver, get_driver_cookies, get_browser

View File

@ -1,62 +1,2 @@
from __future__ import annotations
import random
import secrets
import string
from aiohttp import BaseConnector
from ..typing import Messages, Optional
from ..errors import MissingRequirementsError
from ..cookies import get_cookies
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
Format a series of messages into a single string, optionally adding special tokens.
Args:
messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
add_special_tokens (bool): Whether to add special formatting tokens.
Returns:
str: A formatted string containing all messages.
"""
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join([
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
])
return f"{formatted}\nAssistant:"
def get_random_string(length: int = 10) -> str:
"""
Generate a random string of specified length, containing lowercase letters and digits.
Args:
length (int, optional): Length of the random string to generate. Defaults to 10.
Returns:
str: A random string of the specified length.
"""
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
def get_random_hex() -> str:
"""
Generate a random hexadecimal string of a fixed length.
Returns:
str: A random hexadecimal string of 32 characters (16 bytes).
"""
return secrets.token_hex(16).zfill(32)
def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector
from ..providers.helper import *
from ..cookies import get_cookies

View File

@ -8,8 +8,8 @@ from .Provider import AsyncGeneratorProvider, ProviderUtils
from .typing import Messages, CreateResult, AsyncResult, Union
from .cookies import get_cookies, set_cookies
from . import debug, version
from .base_provider import BaseRetryProvider, ProviderType
from .Provider.base_provider import ProviderModelMixin
from .providers.types import BaseRetryProvider, ProviderType
from .providers.base_provider import ProviderModelMixin
def get_model_and_provider(model : Union[Model, str],
provider : Union[ProviderType, str, None],

View File

@ -1,12 +1,14 @@
from __future__ import annotations
import re
import os
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .typing import Union, Generator, Messages, ImageType
from .base_provider import BaseProvider, ProviderType
from .providers.types import BaseProvider, ProviderType
from .image import ImageResponse as ImageProviderResponse
from .Provider import BingCreateImages, Gemini, OpenaiChat
from .Provider.BingCreateImages import BingCreateImages
from .Provider.needs_auth import Gemini, OpenaiChat
from .errors import NoImageResponseError
from . import get_model_and_provider
@ -43,7 +45,7 @@ def iter_response(
yield ChatCompletionChunk(last_chunk, finish_reason)
content += str(chunk)
if max_tokens is not None and idx + 1 >= max_tokens:
finish_reason = "max_tokens"
finish_reason = "length"
first = -1
word = None
if stop is not None:
@ -69,7 +71,7 @@ def iter_response(
if not stream:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
response = read_json(response)
content = read_json(content)
yield ChatCompletion(content, finish_reason)
class Client():
@ -89,13 +91,14 @@ class Client():
self.proxies: Proxies = proxies
def get_proxy(self) -> Union[str, None]:
if isinstance(self.proxies, str) or self.proxies is None:
if isinstance(self.proxies, str):
return self.proxies
elif self.proxies is None:
return os.environ.get("G4F_PROXY")
elif "all" in self.proxies:
return self.proxies["all"]
elif "https" in self.proxies:
return self.proxies["https"]
return None
class Completions():
def __init__(self, client: Client, provider: ProviderType = None):
@ -123,7 +126,7 @@ class Completions():
stream,
**kwargs
)
response = provider.create_completion(model, messages, stream=stream, **kwargs)
response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs)
stop = [stop] if isinstance(stop, str) else stop
response = iter_response(response, stream, response_format, max_tokens, stop)
return response if stream else next(response)

View File

@ -1,4 +1,4 @@
from .base_provider import ProviderType
from .providers.types import ProviderType
logging: bool = False
version_check: bool = True

View File

@ -97,7 +97,7 @@ class Backend_Api:
current_version = None
return {
"version": current_version,
"latest_version": version.get_latest_version(),
"latest_version": version.utils.latest_version,
}
def generate_title(self):

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
try:
from duckduckgo_search import DDGS
from duckduckgo_search.duckduckgo_search_async import AsyncDDGS
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
@ -30,7 +30,10 @@ class SearchResults():
search += result.snippet
search += f"\n\nSource: [[{idx}]]({result.url})"
return search
def __len__(self) -> int:
return len(self.results)
class SearchResultEntry():
def __init__(self, title: str, url: str, snippet: str, text: str = None):
self.title = title
@ -96,21 +99,20 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults:
if not has_requirements:
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
with DDGS() as ddgs:
async with AsyncDDGS() as ddgs:
results = []
for result in ddgs.text(
async for result in ddgs.text(
query,
region="wt-wt",
safesearch="moderate",
timelimit="y",
max_results=n_results
):
results.append(SearchResultEntry(
result["title"],
result["href"],
result["body"]
))
if len(results) >= n_results:
break
if add_text:
requests = []
@ -136,7 +138,6 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
return SearchResults(formatted_results)
def get_search_message(prompt) -> str:
try:
search_results = asyncio.run(search(prompt))
@ -146,7 +147,6 @@ def get_search_message(prompt) -> str:
Instruction: Using the provided web search results, to write a comprehensive reply to the user request.
Make sure to add the sources of cites using [[Number]](Url) notation after the reference. Example: [[0]](http://google.com)
If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
User request:
{prompt}
@ -154,4 +154,4 @@ User request:
return message
except Exception as e:
print("Couldn't do web search:", e)
return prompt
return prompt

View File

@ -11,7 +11,7 @@ try:
has_requirements = True
except ImportError:
has_requirements = False
from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
@ -28,9 +28,11 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
if not has_requirements:
raise MissingRequirementsError('Install "pillow" package for images')
if isinstance(image, str):
is_data_uri_an_image(image)
image = extract_data_uri(image)
if is_svg:
try:
import cairosvg
@ -41,6 +43,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
buffer = BytesIO()
cairosvg.svg2png(image, write_to=buffer)
return open_image(buffer)
if isinstance(image, bytes):
is_accepted_format(image)
return open_image(BytesIO(image))
@ -48,6 +51,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
image = open_image(image)
image.load()
return image
return image
def is_allowed_extension(filename: str) -> bool:
@ -200,17 +204,16 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
images = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
result = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
images = [
result = "\n".join(
f"[![#{idx+1} {alt}]({preview[idx]})]({image})" for idx, image in enumerate(images)
]
images = "\n".join(images)
)
start_flag = "<!-- generated images start -->\n"
end_flag = "<!-- generated images end -->\n"
return f"\n{start_flag}{images}\n{end_flag}\n"
return f"\n{start_flag}{result}\n{end_flag}\n"
def to_bytes(image: ImageType) -> bytes:
"""
@ -245,19 +248,19 @@ class ImageResponse:
self.images = images
self.alt = alt
self.options = options
def __str__(self) -> str:
return format_images_markdown(self.images, self.alt, self.get("preview"))
def get(self, key: str):
return self.options.get(key)
class ImageRequest:
def __init__(
self,
options: dict = {}
):
self.options = options
def get(self, key: str):
return self.options.get(key)

View File

@ -1,5 +1,7 @@
from __future__ import annotations
from dataclasses import dataclass
from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,

View File

@ -0,0 +1,280 @@
from __future__ import annotations
import sys
import asyncio
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import abstractmethod
from inspect import signature, Parameter
from ..typing import CreateResult, AsyncResult, Messages, Union
from .types import BaseProvider
from ..errors import NestAsyncioError, ModelNotSupportedError
from .. import debug
if sys.version_info < (3, 10):
NoneType = type(None)
else:
from types import NoneType
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
if sys.platform == 'win32':
if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def get_running_loop() -> Union[AbstractEventLoop, None]:
try:
loop = asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
raise NestAsyncioError(
'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
)
return loop
except RuntimeError:
pass
class AbstractProvider(BaseProvider):
"""
Abstract class for providing asynchronous functionality to derived classes.
"""
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
"""
Asynchronously creates a result based on the given model and messages.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
loop = loop or asyncio.get_running_loop()
def create_func() -> str:
return "".join(cls.create_completion(model, messages, False, **kwargs))
return await asyncio.wait_for(
loop.run_in_executor(executor, create_func),
timeout=kwargs.get("timeout")
)
@classmethod
@property
def params(cls) -> str:
"""
Returns the parameters supported by the provider.
Args:
cls (type): The class on which this property is called.
Returns:
str: A string listing the supported parameters.
"""
sig = signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
)
def get_type_name(annotation: type) -> str:
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
args = ""
for name, param in sig.parameters.items():
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(AbstractProvider):
"""
Provides asynchronous functionality for creating completions.
"""
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
"""
Creates a completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to False.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the completion creation.
"""
get_running_loop()
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@abstractmethod
async def create_async(
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Abstract method for creating asynchronous results.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
str: The created result as a string.
"""
raise NotImplementedError()
class AsyncGeneratorProvider(AsyncProvider):
"""
Provides asynchronous generator functionality for streaming results.
"""
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
"""
Creates a streaming completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the streaming completion creation.
"""
loop = get_running_loop()
new_loop = False
if not loop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
new_loop = True
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
# Fix for RuntimeError: async generator ignored GeneratorExit
async def await_callback(callback):
return await callback()
try:
while True:
yield loop.run_until_complete(await_callback(gen.__anext__))
except StopAsyncIteration:
...
# Fix for: ResourceWarning: unclosed event loop
finally:
if new_loop:
loop.close()
asyncio.set_event_loop(None)
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Asynchronously creates a result from a generator.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
return "".join([
chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
if not isinstance(chunk, Exception)
])
@staticmethod
@abstractmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
"""
Abstract method for creating an asynchronous generator.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
AsyncResult: An asynchronous generator yielding results.
"""
raise NotImplementedError()
class ProviderModelMixin:
default_model: str
models: list[str] = []
model_aliases: dict[str, str] = {}
@classmethod
def get_models(cls) -> list[str]:
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
if not model:
model = cls.default_model
elif model in cls.model_aliases:
model = cls.model_aliases[model]
elif model not in cls.get_models():
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
debug.last_model = model
return model

View File

@ -2,9 +2,10 @@ from __future__ import annotations
import re
import asyncio
from .. import debug
from ..typing import CreateResult, Messages
from ..base_provider import BaseProvider, ProviderType
from .types import BaseProvider, ProviderType
system_message = """
You can generate images, pictures, photos or img with the DALL-E 3 image generator.

61
g4f/providers/helper.py Normal file
View File

@ -0,0 +1,61 @@
from __future__ import annotations
import random
import secrets
import string
from aiohttp import BaseConnector
from ..typing import Messages, Optional
from ..errors import MissingRequirementsError
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
Format a series of messages into a single string, optionally adding special tokens.
Args:
messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
add_special_tokens (bool): Whether to add special formatting tokens.
Returns:
str: A formatted string containing all messages.
"""
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join([
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
])
return f"{formatted}\nAssistant:"
def get_random_string(length: int = 10) -> str:
"""
Generate a random string of specified length, containing lowercase letters and digits.
Args:
length (int, optional): Length of the random string to generate. Defaults to 10.
Returns:
str: A random string of the specified length.
"""
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
def get_random_hex() -> str:
"""
Generate a random hexadecimal string of a fixed length.
Returns:
str: A random hexadecimal string of 32 characters (16 bytes).
"""
return secrets.token_hex(16).zfill(32)
def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector

View File

@ -2,8 +2,9 @@ from __future__ import annotations
import asyncio
import random
from ..typing import CreateResult, Messages
from ..base_provider import BaseRetryProvider
from .types import BaseRetryProvider
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type
from .typing import Messages, CreateResult
from ..typing import Messages, CreateResult
class BaseProvider(ABC):
"""
@ -81,7 +81,7 @@ class BaseProvider(ABC):
Dict[str, str]: A dictionary with provider's details.
"""
return {'name': cls.__name__, 'url': cls.url}
class BaseRetryProvider(BaseProvider):
"""
Base class for a provider that implements retry logic.
@ -113,5 +113,5 @@ class BaseRetryProvider(BaseProvider):
self.working = True
self.exceptions: Dict[str, Exception] = {}
self.last_provider: Type[BaseProvider] = None
ProviderType = Union[Type[BaseProvider], BaseRetryProvider]

View File

@ -4,15 +4,15 @@ from urllib.parse import urlparse
try:
from curl_cffi.requests import Session
from .requests_curl_cffi import StreamResponse, StreamSession
from .curl_cffi import StreamResponse, StreamSession
has_curl_cffi = True
except ImportError:
from typing import Type as Session
from .requests_aiohttp import StreamResponse, StreamSession
from .aiohttp import StreamResponse, StreamSession
has_curl_cffi = False
from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from .errors import MissingRequirementsError
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from ..errors import MissingRequirementsError
from .defaults import DEFAULT_HEADERS
def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict:

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientTimeout
from typing import AsyncGenerator, Any
from .Provider.helper import get_connector
from ..providers.helper import get_connector
from .defaults import DEFAULT_HEADERS
class StreamResponse(ClientResponse):

View File

@ -44,7 +44,7 @@ class StreamResponse:
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
self.status_code: int = inner.status_code
self.status: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers

View File

@ -7,6 +7,9 @@ from importlib.metadata import version as get_package_version, PackageNotFoundEr
from subprocess import check_output, CalledProcessError, PIPE
from .errors import VersionNotFoundError
PACKAGE_NAME = "g4f"
GITHUB_REPOSITORY = "xtekky/gpt4free"
def get_pypi_version(package_name: str) -> str:
"""
Retrieves the latest version of a package from PyPI.
@ -45,25 +48,6 @@ def get_github_version(repo: str) -> str:
except requests.RequestException as e:
raise VersionNotFoundError(f"Failed to get GitHub release version: {e}")
def get_latest_version() -> str:
"""
Retrieves the latest release version of the 'g4f' package from PyPI or GitHub.
Returns:
str: The latest release version of 'g4f'.
Note:
The function first tries to fetch the version from PyPI. If the package is not found,
it retrieves the version from the GitHub repository.
"""
try:
# Is installed via package manager?
get_package_version("g4f")
return get_pypi_version("g4f")
except PackageNotFoundError:
# Else use Github version:
return get_github_version("xtekky/gpt4free")
class VersionUtils:
"""
Utility class for managing and comparing package versions of 'g4f'.
@ -82,7 +66,7 @@ class VersionUtils:
"""
# Read from package manager
try:
return get_package_version("g4f")
return get_package_version(PACKAGE_NAME)
except PackageNotFoundError:
pass
@ -108,7 +92,12 @@ class VersionUtils:
Returns:
str: The latest version of 'g4f'.
"""
return get_latest_version()
# Is installed via package manager?
try:
get_package_version(PACKAGE_NAME)
except PackageNotFoundError:
return get_github_version(GITHUB_REPOSITORY)
return get_pypi_version(PACKAGE_NAME)
def check_version(self) -> None:
"""

View File

@ -1,11 +1,11 @@
requests
pycryptodome
curl_cffi>=0.5.10
curl_cffi>=0.6.0b9
aiohttp
certifi
browser_cookie3
PyExecJS
duckduckgo-search
duckduckgo-search>=4.4.3
nest_asyncio
werkzeug
loguru

View File

@ -21,7 +21,7 @@ EXTRA_REQUIRE = {
"py-arkose-generator", # openai
"browser_cookie3", # get_cookies
"PyExecJS", # GptForLove
"duckduckgo-search", # internet.search
"duckduckgo-search>=4.4.3",# internet.search
"beautifulsoup4", # internet.search and bing.create_images
"brotli", # openai
"platformdirs", # webdriver
@ -56,7 +56,7 @@ EXTRA_REQUIRE = {
"gui": [
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search",
"duckduckgo-search>=4.4.3",
"browser_cookie3"
]
}