mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-25 04:01:52 +03:00
b198d900aa
* refactor(g4f/Provider/Airforce.py): Enhance Airforce provider with dynamic model fetching * refactor(g4f/Provider/Blackbox.py): Enhance Blackbox AI provider configuration and streamline code * feat(g4f/Provider/RobocodersAPI.py): Add RobocodersAPI new async chat provider * refactor(g4f/client/__init__.py): Improve provider handling in async_generate method * refactor(g4f/models.py): Update provider configurations for multiple models * refactor(g4f/Provider/Blackbox.py): Streamline model configuration and improve response handling * feat(g4f/Provider/DDG.py): Enhance model support and improve conversation handling * refactor(g4f/Provider/Copilot.py): Enhance Copilot provider with model support * refactor(g4f/Provider/AmigoChat.py): update models and improve code structure * chore(g4f/Provider/not_working/AIUncensored.): move AIUncensored to not_working directory * chore(g4f/Provider/not_working/Allyfy.py): remove Allyfy provider * Update (g4f/Provider/not_working/AIUncensored.py g4f/Provider/not_working/__init__.py) * refactor(g4f/Provider/ChatGptEs.py): Implement format_prompt for message handling * refactor(g4f/Provider/Blackbox.py): Update message formatting and improve code structure * refactor(g4f/Provider/LLMPlayground.py): Enhance text generation and error handling * refactor(g4f/Provider/needs_auth/PollinationsAI.py): move PollinationsAI to needs_auth directory * refactor(g4f/Provider/Liaobots.py): Update Liaobots provider models and aliases * feat(g4f/Provider/DeepInfraChat.py): Add new DeepInfra models and aliases * Update (g4f/Provider/__init__.py) * Update (g4f/models.py) * g4f/models.py * Update g4f/models.py * Update g4f/Provider/LLMPlayground.py * Update (g4f/models.py g4f/Provider/Airforce.py g4f/Provider/__init__.py g4f/Provider/LLMPlayground.py) * Update g4f/Provider/__init__.py * refactor(g4f/Provider/Airforce.py): Enhance text generation with retry and timeout * Update g4f/Provider/AmigoChat.py g4f/Provider/__init__.py * refactor(g4f/Provider/Blackbox.py): update model prefixes and image handling Fixes #2445 - Update model prefixes for gpt-4o, gemini-pro, and claude-sonnet-3.5 - Add 'gpt-3.5-turbo' alias for 'blackboxai' model - Modify image handling in create_async_generator method - Add 'imageGenerationMode' and 'webSearchModePrompt' flags to API request - Remove redundant 'imageBase64' field from image data structure * New provider (g4f/Provider/Blackbox2.py) Support for model llama-3.1-70b text generation * docs(docs/async_client.md): update AsyncClient API guide with minor improvements - Improve formatting and readability of code examples - Add line breaks for better visual separation of sections - Fix minor typos and inconsistencies in text - Enhance clarity of explanations in various sections - Remove unnecessary whitespace * feat(docs/client.md): add response_format parameter - Add 'response_format' parameter to image generation examples - Specify 'url' format for standard image generation - Include 'b64_json' format for base64 encoded image response - Update documentation to reflect new parameter usage - Improve code examples for clarity and consistency * docs(README.md): update usage examples and add image generation - Update text generation example to use new Client API - Add image generation example with Client API - Update configuration section with new cookie setting instructions - Add response_format parameter to image generation example - Remove outdated information and reorganize sections - Update contributors list * refactor(g4f/client/__init__.py): optimize image processing and response handling - Modify _process_image_response to handle 'url' format without local saving - Update ImagesResponse construction to include 'created' timestamp - Simplify image processing logic for different response formats - Improve error handling and logging for image generation - Enhance type hints and docstrings for better code clarity * feat(g4f/models.py): update model providers and add new models - Add Blackbox2 to Provider imports - Update gpt-3.5-turbo best provider to Blackbox - Add Blackbox2 to llama-3.1-70b best providers - Rename dalle_3 to dall_e_3 and update its best providers - Add new models: solar_mini, openhermes_2_5, lfm_40b, zephyr_7b, neural_7b, mythomax_13b - Update ModelUtils.convert with new models and changes - Remove duplicate 'dalle-3' entry in ModelUtils.convert * refactor(Airforce): improve API handling and add authentication - Implement API key authentication with check_api_key method - Refactor image generation to use new imagine2 endpoint - Improve text generation with better error handling and streaming - Update model aliases and add new image models - Enhance content filtering for various model outputs - Replace StreamSession with aiohttp's ClientSession for async operations - Simplify model fetching logic and remove redundant code - Add is_image_model method for better model type checking - Update class attributes for better organization and clarity * feat(g4f/Provider/HuggingChat.py): update HuggingChat model list and aliases Request by @TheFirstNoob - Add 'Qwen/Qwen2.5-72B-Instruct' as the first model in the list - Update model aliases to include 'qwen-2.5-72b' - Reorder existing models in the list for consistency - Remove duplicate entry for 'Qwen/Qwen2.5-72B-Instruct' in models list * refactor(g4f/Provider/ReplicateHome.py): remove unused text models Request by @TheFirstNoob - Removed the 'meta/meta-llama-3-70b-instruct' and 'mistralai/mixtral-8x7b-instruct-v0.1' text models from the list - Updated the list to only include the remaining text and image models - This change simplifies the model configuration and reduces the number of available models, focusing on the core text and image models provided by Replicate * refactor(g4f/Provider/HuggingChat.py): Move HuggingChat to needs_auth directory Request by @TheFirstNoob * Update (g4f/Provider/needs_auth/HuggingChat.py) * Update g4f/models.py * Update g4f/Provider/Airforce.py * Update g4f/models.py g4f/Provider/needs_auth/HuggingChat.py * Added 'Airforce' provider to the 'o1-mini' model (g4f/models.py) * Update (g4f/Provider/Airforce.py g4f/Provider/AmigoChat.py) * Update g4f/models.py g4f/Provider/DeepInfraChat.py g4f/Provider/Airforce.py * Update g4f/Provider/DeepInfraChat.py * Update (g4f/Provider/DeepInfraChat.py) * Update g4f/Provider/Blackbox.py * Update (docs/client.md docs/async_client.md g4f/client/__init__.py) * Update (docs/async_client.md docs/client.md) * Update (g4f/client/__init__.py) --------- Co-authored-by: kqlio67 <kqlio67@users.noreply.github.com> Co-authored-by: kqlio67 <> Co-authored-by: H Lohaus <hlohaus@users.noreply.github.com>
80 lines
3.1 KiB
Python
80 lines
3.1 KiB
Python
from __future__ import annotations
|
|
|
|
from aiohttp import ClientSession, ClientResponseError
|
|
import json
|
|
from ..typing import AsyncResult, Messages, ImageType
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
|
|
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://deepinfra.com/chat"
|
|
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
working = True
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
supports_message_history = True
|
|
|
|
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
|
models = [
|
|
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
|
default_model,
|
|
'Qwen/QwQ-32B-Preview',
|
|
'microsoft/WizardLM-2-8x22B',
|
|
'Qwen/Qwen2.5-72B-Instruct',
|
|
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
|
'nvidia/Llama-3.1-Nemotron-70B-Instruct',
|
|
]
|
|
model_aliases = {
|
|
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
"qwq-32b": "Qwen/QwQ-32B-Preview",
|
|
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
|
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
|
|
"qwen-2.5-coder-32b": "Qwen2.5-Coder-32B-Instruct",
|
|
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
|
}
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
headers = {
|
|
'Content-Type': 'application/json',
|
|
'Origin': 'https://deepinfra.com',
|
|
'Referer': 'https://deepinfra.com/',
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
|
'X-Deepinfra-Source': 'web-page',
|
|
'accept': 'text/event-stream',
|
|
}
|
|
|
|
data = {
|
|
'model': model,
|
|
'messages': messages,
|
|
'stream': True
|
|
}
|
|
|
|
async with ClientSession(headers=headers) as session:
|
|
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
|
response.raise_for_status()
|
|
async for line in response.content:
|
|
if line:
|
|
decoded_line = line.decode('utf-8').strip()
|
|
if decoded_line.startswith('data:'):
|
|
json_part = decoded_line[5:].strip()
|
|
if json_part == '[DONE]':
|
|
break
|
|
try:
|
|
data = json.loads(json_part)
|
|
choices = data.get('choices', [])
|
|
if choices:
|
|
delta = choices[0].get('delta', {})
|
|
content = delta.get('content', '')
|
|
if content:
|
|
yield content
|
|
except json.JSONDecodeError:
|
|
print(f"JSON decode error: {json_part}")
|