mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 19:11:48 +03:00
bb9132bcb4
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. * fix: Updating provider documentation and small fixes in providers * Disabled the provider (RobocodersAPI) * Fix: Conflicting file g4f/models.py * Update g4f/models.py g4f/Provider/Airforce.py * Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py * Update docs/providers-and-models.md * Update .gitignore * Update g4f/models.py * Update g4f/Provider/PollinationsAI.py --------- Co-authored-by: kqlio67 <>
298 lines
9.9 KiB
Python
298 lines
9.9 KiB
Python
from __future__ import annotations
|
|
|
|
import uuid
|
|
from aiohttp import ClientSession, BaseConnector
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
from .helper import get_connector
|
|
from ..requests import raise_for_status
|
|
|
|
models = {
|
|
"gpt-4o-mini-free": {
|
|
"id": "gpt-4o-mini-free",
|
|
"name": "GPT-4o-Mini-Free",
|
|
"model": "ChatGPT",
|
|
"provider": "OpenAI",
|
|
"maxLength": 31200,
|
|
"tokenLimit": 7800,
|
|
"context": "8K",
|
|
},
|
|
"gpt-4o-2024-08-06": {
|
|
"id": "gpt-4o-2024-08-06",
|
|
"name": "GPT-4o",
|
|
"model": "ChatGPT",
|
|
"provider": "OpenAI",
|
|
"maxLength": 260000,
|
|
"tokenLimit": 126000,
|
|
"context": "128K",
|
|
},
|
|
"gpt-4o-mini-2024-07-18": {
|
|
"id": "gpt-4o-mini-2024-07-18",
|
|
"name": "GPT-4o-Mini",
|
|
"model": "ChatGPT",
|
|
"provider": "OpenAI",
|
|
"maxLength": 260000,
|
|
"tokenLimit": 126000,
|
|
"context": "128K",
|
|
},
|
|
"o1-preview-2024-09-12": {
|
|
"id": "o1-preview-2024-09-12",
|
|
"name": "o1-preview",
|
|
"model": "o1",
|
|
"provider": "OpenAI",
|
|
"maxLength": 400000,
|
|
"tokenLimit": 100000,
|
|
"context": "128K",
|
|
},
|
|
"o1-mini-2024-09-12": {
|
|
"id": "o1-mini-2024-09-12",
|
|
"name": "o1-mini",
|
|
"model": "o1",
|
|
"provider": "OpenAI",
|
|
"maxLength": 400000,
|
|
"tokenLimit": 100000,
|
|
"context": "128K",
|
|
},
|
|
"grok-beta": {
|
|
"id": "grok-beta",
|
|
"name": "Grok-Beta",
|
|
"model": "Grok",
|
|
"provider": "x.ai",
|
|
"maxLength": 400000,
|
|
"tokenLimit": 100000,
|
|
"context": "100K",
|
|
},
|
|
"claude-3-opus-20240229": {
|
|
"id": "claude-3-opus-20240229",
|
|
"name": "Claude-3-Opus",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"claude-3-5-sonnet-20240620": {
|
|
"id": "claude-3-5-sonnet-20240620",
|
|
"name": "Claude-3.5-Sonnet",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"claude-3-5-sonnet-20241022": {
|
|
"id": "claude-3-5-sonnet-20241022",
|
|
"name": "Claude-3.5-Sonnet-V2",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"claude-3-sonnet-20240229": {
|
|
"id": "claude-3-sonnet-20240229",
|
|
"name": "Claude-3-Sonnet",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"claude-3-opus-20240229-t": {
|
|
"id": "claude-3-opus-20240229-t",
|
|
"name": "Claude-3-Opus-T",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"claude-3-5-sonnet-20241022-t": {
|
|
"id": "claude-3-5-sonnet-20241022-t",
|
|
"name": "Claude-3.5-Sonnet-V2-T",
|
|
"model": "Claude",
|
|
"provider": "Anthropic",
|
|
"maxLength": 800000,
|
|
"tokenLimit": 200000,
|
|
"context": "200K",
|
|
},
|
|
"gemini-1.5-flash-002": {
|
|
"id": "gemini-1.5-flash-002",
|
|
"name": "Gemini-1.5-Flash-1M",
|
|
"model": "Gemini",
|
|
"provider": "Google",
|
|
"maxLength": 4000000,
|
|
"tokenLimit": 1000000,
|
|
"context": "1024K",
|
|
},
|
|
"gemini-1.5-pro-002": {
|
|
"id": "gemini-1.5-pro-002",
|
|
"name": "Gemini-1.5-Pro-1M",
|
|
"model": "Gemini",
|
|
"provider": "Google",
|
|
"maxLength": 4000000,
|
|
"tokenLimit": 1000000,
|
|
"context": "1024K",
|
|
}
|
|
}
|
|
|
|
|
|
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://liaobots.site"
|
|
working = True
|
|
supports_message_history = True
|
|
supports_system_message = True
|
|
|
|
default_model = "gpt-4o-2024-08-06"
|
|
models = list(models.keys())
|
|
model_aliases = {
|
|
"gpt-4o-mini": "gpt-4o-mini-free",
|
|
"gpt-4o": "gpt-4o-2024-08-06",
|
|
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
|
"gpt-4": "gpt-4o-2024-08-06",
|
|
|
|
"o1-preview": "o1-preview-2024-09-12",
|
|
"o1-mini": "o1-mini-2024-09-12",
|
|
|
|
"claude-3-opus": "claude-3-opus-20240229",
|
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
|
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
|
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
|
"claude-3-opus": "claude-3-opus-20240229-t",
|
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
|
|
|
"gemini-flash": "gemini-1.5-flash-002",
|
|
"gemini-pro": "gemini-1.5-pro-002"
|
|
}
|
|
|
|
_auth_code = ""
|
|
_cookie_jar = None
|
|
|
|
@classmethod
|
|
def get_model(cls, model: str) -> str:
|
|
"""
|
|
Retrieve the internal model identifier based on the provided model name or alias.
|
|
"""
|
|
if model in cls.model_aliases:
|
|
model = cls.model_aliases[model]
|
|
if model not in models:
|
|
raise ValueError(f"Model '{model}' is not supported.")
|
|
return model
|
|
|
|
@classmethod
|
|
def is_supported(cls, model: str) -> bool:
|
|
"""
|
|
Check if the given model is supported.
|
|
"""
|
|
return model in models or model in cls.model_aliases
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
auth: str = None,
|
|
proxy: str = None,
|
|
connector: BaseConnector = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
model = cls.get_model(model)
|
|
|
|
headers = {
|
|
"authority": "liaobots.com",
|
|
"content-type": "application/json",
|
|
"origin": cls.url,
|
|
"referer": f"{cls.url}/",
|
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
|
}
|
|
async with ClientSession(
|
|
headers=headers,
|
|
cookie_jar=cls._cookie_jar,
|
|
connector=get_connector(connector, proxy, True)
|
|
) as session:
|
|
data = {
|
|
"conversationId": str(uuid.uuid4()),
|
|
"model": models[model],
|
|
"messages": messages,
|
|
"key": "",
|
|
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
|
|
}
|
|
if not cls._auth_code:
|
|
async with session.post(
|
|
"https://liaobots.work/recaptcha/api/login",
|
|
data={"token": "abcdefghijklmnopqrst"},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
try:
|
|
async with session.post(
|
|
"https://liaobots.work/api/user",
|
|
json={"authcode": cls._auth_code},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
|
if not cls._auth_code:
|
|
raise RuntimeError("Empty auth code")
|
|
cls._cookie_jar = session.cookie_jar
|
|
async with session.post(
|
|
"https://liaobots.work/api/chat",
|
|
json=data,
|
|
headers={"x-auth-code": cls._auth_code},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
async for chunk in response.content.iter_any():
|
|
if b"<html coupert-item=" in chunk:
|
|
raise RuntimeError("Invalid session")
|
|
if chunk:
|
|
yield chunk.decode(errors="ignore")
|
|
except:
|
|
async with session.post(
|
|
"https://liaobots.work/api/user",
|
|
json={"authcode": "pTIQr4FTnVRfr"},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
|
if not cls._auth_code:
|
|
raise RuntimeError("Empty auth code")
|
|
cls._cookie_jar = session.cookie_jar
|
|
async with session.post(
|
|
"https://liaobots.work/api/chat",
|
|
json=data,
|
|
headers={"x-auth-code": cls._auth_code},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
async for chunk in response.content.iter_any():
|
|
if b"<html coupert-item=" in chunk:
|
|
raise RuntimeError("Invalid session")
|
|
if chunk:
|
|
yield chunk.decode(errors="ignore")
|
|
|
|
@classmethod
|
|
async def initialize_auth_code(cls, session: ClientSession) -> None:
|
|
"""
|
|
Initialize the auth code by making the necessary login requests.
|
|
"""
|
|
async with session.post(
|
|
"https://liaobots.work/api/user",
|
|
json={"authcode": "pTIQr4FTnVRfr"},
|
|
verify_ssl=False
|
|
) as response:
|
|
await raise_for_status(response)
|
|
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
|
if not cls._auth_code:
|
|
raise RuntimeError("Empty auth code")
|
|
cls._cookie_jar = session.cookie_jar
|
|
|
|
@classmethod
|
|
async def ensure_auth_code(cls, session: ClientSession) -> None:
|
|
"""
|
|
Ensure the auth code is initialized, and if not, perform the initialization.
|
|
"""
|
|
if not cls._auth_code:
|
|
await cls.initialize_auth_code(session)
|