mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-25 04:01:52 +03:00
bb9132bcb4
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. * fix: Updating provider documentation and small fixes in providers * Disabled the provider (RobocodersAPI) * Fix: Conflicting file g4f/models.py * Update g4f/models.py g4f/Provider/Airforce.py * Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py * Update docs/providers-and-models.md * Update .gitignore * Update g4f/models.py * Update g4f/Provider/PollinationsAI.py --------- Co-authored-by: kqlio67 <>
136 lines
6.4 KiB
Python
136 lines
6.4 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
import aiohttp
|
|
from aiohttp import ClientSession, BaseConnector
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
|
|
from .helper import format_prompt
|
|
from ..requests.aiohttp import get_connector
|
|
from ..requests.raise_for_status import raise_for_status
|
|
from .. import debug
|
|
|
|
MODELS = [
|
|
{"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"},
|
|
{"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"},
|
|
{"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"},
|
|
{"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"},
|
|
{"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"},
|
|
{"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"},
|
|
{"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"},
|
|
{"model":"Qwen/Qwen2.5-Coder-32B-Instruct","modelName":"Qwen 2.5 Coder","modelVariant":"32B","modelStyleId":"qwen","createdBy":"Alibaba Cloud","moderationLevel":"LOW","isAvailable":0,"isOpenSource":1,"inputCharLimit":16e3,"settingId":"90"}
|
|
]
|
|
|
|
class Conversation(BaseConversation):
|
|
vqd: str = None
|
|
message_history: Messages = []
|
|
|
|
def __init__(self, model: str):
|
|
self.model = model
|
|
|
|
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
|
label = "DuckDuckGo AI Chat"
|
|
url = "https://duckduckgo.com/aichat"
|
|
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
|
|
working = True
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
supports_message_history = True
|
|
|
|
default_model = "gpt-4o-mini"
|
|
models = [model.get("model") for model in MODELS]
|
|
model_aliases = {
|
|
"claude-3-haiku": "claude-3-haiku-20240307",
|
|
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
"gpt-4": "gpt-4o-mini",
|
|
}
|
|
|
|
@classmethod
|
|
async def get_vqd(cls, proxy: str, connector: BaseConnector = None):
|
|
status_url = "https://duckduckgo.com/duckchat/v1/status"
|
|
headers = {
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
|
'Accept': 'text/event-stream',
|
|
'x-vqd-accept': '1'
|
|
}
|
|
async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session:
|
|
async with session.get(status_url, headers=headers) as response:
|
|
await raise_for_status(response)
|
|
return response.headers.get("x-vqd-4")
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
conversation: Conversation = None,
|
|
return_conversation: bool = False,
|
|
proxy: str = None,
|
|
connector: BaseConnector = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
model = cls.get_model(model)
|
|
|
|
is_new_conversation = False
|
|
if conversation is None:
|
|
conversation = Conversation(model)
|
|
is_new_conversation = True
|
|
|
|
debug.last_model = model
|
|
|
|
if conversation.vqd is None:
|
|
conversation.vqd = await cls.get_vqd(proxy, connector)
|
|
if not conversation.vqd:
|
|
raise Exception("Failed to obtain VQD token")
|
|
|
|
headers = {
|
|
'accept': 'text/event-stream',
|
|
'content-type': 'application/json',
|
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
|
'x-vqd-4': conversation.vqd,
|
|
}
|
|
|
|
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
|
|
if is_new_conversation:
|
|
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
|
|
else:
|
|
if len(messages) >= 2:
|
|
conversation.message_history = [
|
|
*conversation.message_history,
|
|
messages[-2],
|
|
messages[-1]
|
|
]
|
|
elif len(messages) == 1:
|
|
conversation.message_history = [
|
|
*conversation.message_history,
|
|
messages[-1]
|
|
]
|
|
|
|
if return_conversation:
|
|
yield conversation
|
|
|
|
data = {
|
|
"model": conversation.model,
|
|
"messages": conversation.message_history
|
|
}
|
|
|
|
async with session.post(cls.api_endpoint, json=data) as response:
|
|
conversation.vqd = response.headers.get("x-vqd-4")
|
|
await raise_for_status(response)
|
|
|
|
async for line in response.content:
|
|
if line:
|
|
decoded_line = line.decode('utf-8')
|
|
if decoded_line.startswith('data: '):
|
|
json_str = decoded_line[6:]
|
|
if json_str == '[DONE]':
|
|
break
|
|
try:
|
|
json_data = json.loads(json_str)
|
|
if 'message' in json_data:
|
|
yield json_data['message']
|
|
except json.JSONDecodeError:
|
|
pass
|