gpt4free/g4f/providers/retry_provider.py

310 lines
12 KiB
Python
Raw Normal View History

2023-09-21 21:10:59 +03:00
from __future__ import annotations
2023-10-17 10:29:12 +03:00
import asyncio
2023-09-21 21:10:59 +03:00
import random
from ..typing import Type, List, CreateResult, Messages, AsyncResult
2024-05-19 06:09:55 +03:00
from .types import BaseProvider, BaseRetryProvider, ProviderType
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
2023-09-21 21:10:59 +03:00
DEFAULT_TIMEOUT = 60
2024-05-19 06:09:55 +03:00
class IterListProvider(BaseRetryProvider):
def __init__(
self,
providers: List[Type[BaseProvider]],
2024-05-15 03:27:51 +03:00
shuffle: bool = True
) -> None:
"""
Initialize the BaseRetryProvider.
Args:
providers (List[Type[BaseProvider]]): List of providers to use.
shuffle (bool): Whether to shuffle the providers list.
single_provider_retry (bool): Whether to retry a single provider if it fails.
max_retries (int): Maximum number of retries for a single provider.
"""
self.providers = providers
self.shuffle = shuffle
self.working = True
self.last_provider: Type[BaseProvider] = None
2023-09-21 21:10:59 +03:00
def create_completion(
self,
model: str,
2023-10-10 10:49:29 +03:00
messages: Messages,
2023-09-21 21:10:59 +03:00
stream: bool = False,
**kwargs,
2023-09-21 21:10:59 +03:00
) -> CreateResult:
"""
Create a completion using available providers, with an option to stream the response.
Args:
model (str): The model to be used for completion.
messages (Messages): The messages to be used for generating completion.
stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False.
Yields:
CreateResult: Tokens or results from the completion.
Raises:
Exception: Any exception encountered during the completion process.
"""
exceptions = {}
2023-10-07 11:17:43 +03:00
started: bool = False
2024-05-19 06:09:55 +03:00
for provider in self.get_providers(stream):
2024-05-15 03:27:51 +03:00
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
2024-05-15 03:27:51 +03:00
try:
for chunk in provider.create_completion(model, messages, stream, **kwargs):
if chunk:
yield chunk
started = True
2024-05-15 03:27:51 +03:00
if started:
return
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
raise_exceptions(exceptions)
async def create_async(
self,
model: str,
messages: Messages,
**kwargs,
) -> str:
"""
Asynchronously create a completion using available providers.
Args:
model (str): The model to be used for completion.
messages (Messages): The messages to be used for generating completion.
Returns:
str: The result of the asynchronous completion.
Raises:
Exception: Any exception encountered during the asynchronous completion process.
"""
exceptions = {}
2024-05-19 06:09:55 +03:00
for provider in self.get_providers(False):
2024-05-15 03:27:51 +03:00
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
2024-05-15 03:27:51 +03:00
try:
chunk = await asyncio.wait_for(
2024-05-15 03:27:51 +03:00
provider.create_async(model, messages, **kwargs),
timeout=kwargs.get("timeout", DEFAULT_TIMEOUT),
2024-05-15 03:27:51 +03:00
)
if chunk:
return chunk
2024-05-15 03:27:51 +03:00
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
raise_exceptions(exceptions)
2024-05-19 06:09:55 +03:00
def get_providers(self, stream: bool) -> list[ProviderType]:
providers = [p for p in self.providers if p.supports_stream] if stream else self.providers
2024-05-15 03:27:51 +03:00
if self.shuffle:
random.shuffle(providers)
return providers
async def create_async_generator(
self,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
exceptions = {}
started: bool = False
for provider in self.get_providers(stream):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
2024-05-15 03:27:51 +03:00
try:
if not stream:
chunk = await asyncio.wait_for(
provider.create_async(model, messages, **kwargs),
timeout=kwargs.get("timeout", DEFAULT_TIMEOUT),
)
if chunk:
yield chunk
2024-11-28 12:42:14 +03:00
started = True
elif hasattr(provider, "create_async_generator"):
async for chunk in provider.create_async_generator(model, messages, stream=stream, **kwargs):
if chunk:
yield chunk
started = True
2024-05-15 03:27:51 +03:00
else:
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
2024-05-19 06:09:55 +03:00
started = True
2024-05-15 03:27:51 +03:00
if started:
return
except Exception as e:
exceptions[provider.__name__] = e
debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
2024-05-15 03:27:51 +03:00
if started:
raise e
raise_exceptions(exceptions)
2024-05-19 06:09:55 +03:00
class RetryProvider(IterListProvider):
2024-05-15 03:27:51 +03:00
def __init__(
self,
providers: List[Type[BaseProvider]],
shuffle: bool = True,
single_provider_retry: bool = False,
max_retries: int = 3,
) -> None:
"""
Initialize the BaseRetryProvider.
Args:
providers (List[Type[BaseProvider]]): List of providers to use.
shuffle (bool): Whether to shuffle the providers list.
single_provider_retry (bool): Whether to retry a single provider if it fails.
max_retries (int): Maximum number of retries for a single provider.
"""
super().__init__(providers, shuffle)
self.single_provider_retry = single_provider_retry
self.max_retries = max_retries
def create_completion(
self,
model: str,
messages: Messages,
stream: bool = False,
**kwargs,
) -> CreateResult:
"""
Create a completion using available providers, with an option to stream the response.
Args:
model (str): The model to be used for completion.
messages (Messages): The messages to be used for generating completion.
stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False.
Yields:
CreateResult: Tokens or results from the completion.
Raises:
Exception: Any exception encountered during the completion process.
"""
2024-05-19 06:09:55 +03:00
if self.single_provider_retry:
2024-05-15 03:27:51 +03:00
exceptions = {}
started: bool = False
2024-05-19 06:09:55 +03:00
provider = self.providers[0]
self.last_provider = provider
for attempt in range(self.max_retries):
try:
if debug.logging:
print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
2024-05-19 06:09:55 +03:00
started = True
if started:
return
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
2024-05-15 03:27:51 +03:00
raise_exceptions(exceptions)
else:
2024-05-15 03:27:51 +03:00
yield from super().create_completion(model, messages, stream, **kwargs)
2023-09-21 21:10:59 +03:00
async def create_async(
self,
model: str,
2023-10-10 10:49:29 +03:00
messages: Messages,
**kwargs,
2023-09-21 21:10:59 +03:00
) -> str:
"""
Asynchronously create a completion using available providers.
Args:
model (str): The model to be used for completion.
messages (Messages): The messages to be used for generating completion.
Returns:
str: The result of the asynchronous completion.
Raises:
Exception: Any exception encountered during the asynchronous completion process.
"""
exceptions = {}
2024-05-19 06:09:55 +03:00
if self.single_provider_retry:
provider = self.providers[0]
self.last_provider = provider
for attempt in range(self.max_retries):
try:
if debug.logging:
print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
return await asyncio.wait_for(
provider.create_async(model, messages, **kwargs),
timeout=kwargs.get("timeout", 60),
)
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
2024-05-15 03:27:51 +03:00
raise_exceptions(exceptions)
else:
2024-05-15 03:27:51 +03:00
return await super().create_async(model, messages, **kwargs)
async def create_async_generator(
self,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
exceptions = {}
started = False
if self.single_provider_retry:
provider = self.providers[0]
self.last_provider = provider
for attempt in range(self.max_retries):
try:
debug.log(f"Using {provider.__name__} provider (attempt {attempt + 1})")
if not stream:
chunk = await asyncio.wait_for(
provider.create_async(model, messages, **kwargs),
timeout=kwargs.get("timeout", DEFAULT_TIMEOUT),
)
if chunk:
yield chunk
started = True
elif hasattr(provider, "create_async_generator"):
async for chunk in provider.create_async_generator(model, messages, stream=stream, **kwargs):
if chunk:
yield chunk
started = True
else:
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
started = True
if started:
return
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
raise_exceptions(exceptions)
else:
async for chunk in super().create_async_generator(model, messages, stream, **kwargs):
yield chunk
def raise_exceptions(exceptions: dict) -> None:
"""
Raise a combined exception if any occurred during retries.
Raises:
RetryProviderError: If any provider encountered an exception.
RetryNoProviderError: If no provider is found.
"""
if exceptions:
raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
f"{p}: {exception.__class__.__name__}: {exception}" for p, exception in exceptions.items()
]))
raise RetryNoProviderError("No provider found")