mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-24 03:23:49 +03:00
ca2b609e82
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. * fix: Updating provider documentation and small fixes in providers * Disabled the provider (RobocodersAPI) * Fix: Conflicting file g4f/models.py * Update g4f/models.py g4f/Provider/Airforce.py * Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py * Update docs/providers-and-models.md * Update .gitignore * Update g4f/models.py * Update g4f/Provider/PollinationsAI.py * feat(g4f/Provider/Blackbox.py): add support for additional AI models and agents - Introduce new agent modes for Meta-Llama, Mistral, DeepSeek, DBRX, Qwen, and Nous-Hermes - Update model aliases to include newly supported models * Update (g4f/Provider/Blackbox.py) * Update (g4f/Provider/Blackbox.py) * feat(g4f/Provider/Blackbox2.py): add license key caching and validation - Add cache file management for license key persistence - Implement async license key extraction from JavaScript files - Add license key validation to text generation requests - Update type hints for async generators - Add error handling for cache file operations Breaking changes: - Text generation now requires license key validation --------- Co-authored-by: kqlio67 <>
303 lines
13 KiB
Python
303 lines
13 KiB
Python
from __future__ import annotations
|
|
|
|
from aiohttp import ClientSession
|
|
import random
|
|
import string
|
|
import json
|
|
import re
|
|
import aiohttp
|
|
|
|
import json
|
|
from pathlib import Path
|
|
|
|
from ..typing import AsyncResult, Messages, ImagesType
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
from ..image import ImageResponse, to_data_uri
|
|
from ..cookies import get_cookies_dir
|
|
from .helper import format_prompt
|
|
|
|
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
label = "Blackbox AI"
|
|
url = "https://www.blackbox.ai"
|
|
api_endpoint = "https://www.blackbox.ai/api/chat"
|
|
|
|
working = True
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
supports_message_history = True
|
|
|
|
default_model = 'blackboxai'
|
|
default_vision_model = default_model
|
|
default_image_model = 'flux'
|
|
image_models = ['ImageGeneration', 'repomap']
|
|
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
|
|
|
|
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
|
|
|
|
agentMode = {
|
|
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
|
'meta-llama/Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
|
|
'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
|
|
'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
|
|
'databricks/dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
|
|
'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"}, #
|
|
'Qwen/QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
|
|
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"}
|
|
}
|
|
|
|
trendingAgentMode = {
|
|
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
|
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
|
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
|
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
|
|
#
|
|
'Python Agent': {'mode': True, 'id': "Python Agent"},
|
|
'Java Agent': {'mode': True, 'id': "Java Agent"},
|
|
'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
|
|
'HTML Agent': {'mode': True, 'id': "HTML Agent"},
|
|
'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
|
|
'Android Developer': {'mode': True, 'id': "Android Developer"},
|
|
'Swift Developer': {'mode': True, 'id': "Swift Developer"},
|
|
'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
|
|
'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
|
|
'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
|
|
'React Agent': {'mode': True, 'id': "React Agent"},
|
|
'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
|
|
'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
|
|
#
|
|
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
|
#
|
|
'repomap': {'mode': True, 'id': "repomap"},
|
|
#
|
|
'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
|
|
'Godot Agent': {'mode': True, 'id': "Godot Agent"},
|
|
'Go Agent': {'mode': True, 'id': "Go Agent"},
|
|
'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
|
|
'Git Agent': {'mode': True, 'id': "Git Agent"},
|
|
'Flask Agent': {'mode': True, 'id': "Flask Agent"},
|
|
'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
|
|
'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
|
|
'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
|
|
'Electron Agent': {'mode': True, 'id': "Electron Agent"},
|
|
'Docker Agent': {'mode': True, 'id': "Docker Agent"},
|
|
'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
|
|
'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
|
|
'Azure Agent': {'mode': True, 'id': "Azure Agent"},
|
|
'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
|
|
'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
|
|
'builder Agent': {'mode': True, 'id': "builder Agent"},
|
|
}
|
|
|
|
additional_prefixes = {
|
|
'gpt-4o': '@GPT-4o',
|
|
'gemini-pro': '@Gemini-PRO',
|
|
'claude-sonnet-3.5': '@Claude-Sonnet-3.5'
|
|
}
|
|
|
|
model_prefixes = {
|
|
**{
|
|
mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
|
|
if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]
|
|
},
|
|
**additional_prefixes
|
|
}
|
|
|
|
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
|
|
|
|
model_aliases = {
|
|
### chat ###
|
|
"gpt-4": "gpt-4o",
|
|
"gemini-flash": "gemini-1.5-flash",
|
|
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
|
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
|
"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
|
|
"dbrx-instruct": "databricks/dbrx-instruct",
|
|
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
|
|
"qwq-32b": "Qwen/QwQ-32B-Preview",
|
|
"hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
|
|
### image ###
|
|
"flux": "ImageGeneration",
|
|
}
|
|
|
|
@classmethod
|
|
def _get_cache_file(cls) -> Path:
|
|
dir = Path(get_cookies_dir())
|
|
dir.mkdir(exist_ok=True)
|
|
return dir / 'blackbox.json'
|
|
|
|
@classmethod
|
|
def _load_cached_value(cls) -> str | None:
|
|
cache_file = cls._get_cache_file()
|
|
if cache_file.exists():
|
|
try:
|
|
with open(cache_file, 'r') as f:
|
|
data = json.load(f)
|
|
return data.get('validated_value')
|
|
except Exception as e:
|
|
print(f"Error reading cache file: {e}")
|
|
return None
|
|
|
|
@classmethod
|
|
def _save_cached_value(cls, value: str):
|
|
cache_file = cls._get_cache_file()
|
|
try:
|
|
with open(cache_file, 'w') as f:
|
|
json.dump({'validated_value': value}, f)
|
|
except Exception as e:
|
|
print(f"Error writing to cache file: {e}")
|
|
|
|
@classmethod
|
|
async def fetch_validated(cls):
|
|
# Let's try to load the value from the cache first
|
|
cached_value = cls._load_cached_value()
|
|
if cached_value:
|
|
return cached_value
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
try:
|
|
async with session.get(cls.url) as response:
|
|
if response.status != 200:
|
|
print("Failed to load the page.")
|
|
return cached_value
|
|
|
|
page_content = await response.text()
|
|
js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
|
|
|
|
key_pattern = re.compile(r'w="([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"')
|
|
|
|
for js_file in js_files:
|
|
js_url = f"{cls.url}/_next/{js_file}"
|
|
async with session.get(js_url) as js_response:
|
|
if js_response.status == 200:
|
|
js_content = await js_response.text()
|
|
match = key_pattern.search(js_content)
|
|
if match:
|
|
validated_value = match.group(1)
|
|
# Save the new value to the cache file
|
|
cls._save_cached_value(validated_value)
|
|
return validated_value
|
|
except Exception as e:
|
|
print(f"Error fetching validated value: {e}")
|
|
|
|
return cached_value
|
|
|
|
@staticmethod
|
|
def generate_id(length=7):
|
|
characters = string.ascii_letters + string.digits
|
|
return ''.join(random.choice(characters) for _ in range(length))
|
|
|
|
@classmethod
|
|
def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
|
|
prefix = cls.model_prefixes.get(model, "")
|
|
if not prefix:
|
|
return messages
|
|
|
|
new_messages = []
|
|
for message in messages:
|
|
new_message = message.copy()
|
|
if message['role'] == 'user':
|
|
new_message['content'] = (prefix + " " + message['content']).strip()
|
|
new_messages.append(new_message)
|
|
|
|
return new_messages
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
prompt: str = None,
|
|
proxy: str = None,
|
|
web_search: bool = False,
|
|
images: ImagesType = None,
|
|
top_p: float = None,
|
|
temperature: float = None,
|
|
max_tokens: int = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
message_id = cls.generate_id()
|
|
messages = cls.add_prefix_to_messages(messages, model)
|
|
validated_value = await cls.fetch_validated()
|
|
formatted_message = format_prompt(messages)
|
|
model = cls.get_model(model)
|
|
|
|
messages = [{"id": message_id, "content": formatted_message, "role": "user"}]
|
|
|
|
if images is not None:
|
|
messages[-1]['data'] = {
|
|
"imagesData": [
|
|
{
|
|
"filePath": f"MultipleFiles/{image_name}",
|
|
"contents": to_data_uri(image)
|
|
}
|
|
for image, image_name in images
|
|
],
|
|
"fileText": "",
|
|
"title": ""
|
|
}
|
|
|
|
headers = {
|
|
'accept': '*/*',
|
|
'content-type': 'application/json',
|
|
'origin': cls.url,
|
|
'referer': f'{cls.url}/',
|
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
|
}
|
|
|
|
data = {
|
|
"agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
|
|
"clickedAnswer2": False,
|
|
"clickedAnswer3": False,
|
|
"clickedForceWebSearch": False,
|
|
"codeModelMode": True,
|
|
"deepSearchMode": False,
|
|
"githubToken": None,
|
|
"id": message_id,
|
|
"imageGenerationMode": False,
|
|
"isChromeExt": False,
|
|
"isMicMode": False,
|
|
"maxTokens": max_tokens,
|
|
"messages": messages,
|
|
"mobileClient": False,
|
|
"playgroundTemperature": temperature,
|
|
"playgroundTopP": top_p,
|
|
"previewToken": None,
|
|
"trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
|
|
"userId": None,
|
|
"userSelectedModel": model if model in cls.userSelectedModel else None,
|
|
"userSystemPrompt": None,
|
|
"validated": validated_value,
|
|
"visitFromDelta": False,
|
|
"webSearchModePrompt": False,
|
|
"webSearchMode": web_search
|
|
}
|
|
|
|
async with ClientSession(headers=headers) as session:
|
|
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
|
response.raise_for_status()
|
|
response_text = await response.text()
|
|
|
|
if model in cls.image_models:
|
|
image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
|
|
if image_matches:
|
|
image_url = image_matches[0]
|
|
yield ImageResponse(image_url, prompt)
|
|
return
|
|
|
|
response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
|
|
|
|
json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
|
|
if json_match:
|
|
search_results = json.loads(json_match.group(1))
|
|
answer = response_text.split('$~~~$')[-1].strip()
|
|
|
|
formatted_response = f"{answer}\n\n**Source:**"
|
|
for i, result in enumerate(search_results, 1):
|
|
formatted_response += f"\n{i}. {result['title']}: {result['link']}"
|
|
|
|
yield formatted_response
|
|
else:
|
|
yield response_text.strip()
|