Update (g4f/Provider/Airforce.py)

This commit is contained in:
kqlio67 2024-11-01 18:01:29 +02:00
parent b467e5d758
commit 11bec81dc4
6 changed files with 653 additions and 334 deletions

View File

@ -16,12 +16,12 @@ This document provides an overview of various AI providers and models, including
| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth | | Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
|----------|-------------|--------------|---------------|--------|--------|------| |----------|-------------|--------------|---------------|--------|--------|------|
|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|`gpt-3.5-turbo, gpt-4o`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌| |[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`||❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌| |[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌| |[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, gpt-4o, claude-3-haiku, claude-3-sonnet, claude-3-5-sonnet, claude-3-opus, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, mixtral-8x7b mixtral-8x22b, mistral-7b, qwen-1.5-7b, qwen-1.5-14b, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2-9b, gemma-2-27b, gemini-flash, gemini-pro, deepseek, mixtral-8x7b-dpo, yi-34b, wizardlm-2-8x22b, solar-10.7b, mythomax-l2-13b, cosmosrp`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`claude-3-haiku, claude-3-sonnet, claude-3-opus, gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, llamaguard-3-8b, llama-3.2-11b, llamaguard-3-11b, llama-3.2-3b, llama-3.2-1b, llama-2-7b, mixtral-8x7b, mixtral-8x22b, mythomax-13b, openchat-3.5, qwen-2-72b, qwen-2-5-7b, qwen-2-5-72b, gemma-2b, gemma-2-9b, gemma-2b-27b, gemini-flash, gemini-pro, dbrx-instruct, deepseek-coder, hermes-2-dpo, hermes-2, openhermes-2.5, wizardlm-2-8x22b, phi-2, solar-10-7b, cosmosrp, lfm-40b, german-7b, zephyr-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|`gpt-4o, gpt-4o-mini, o1, o1-mini, claude-3.5-sonnet, llama-3.2-90b, llama-3.1-405b, gemini-pro`|`flux-pro, flux-realism, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|`gpt-4o, gpt-4o-mini, o1, o1-mini, claude-3.5-sonnet, llama-3.2-90b, llama-3.1-405b, gemini-pro`|`flux-pro, flux-realism, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -131,6 +131,8 @@ This document provides an overview of various AI providers and models, including
|mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)| |mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)| |mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)|
|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| |mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|hermes-2|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)|
|yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)| |yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)|
|hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)| |hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)|
|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)| |gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
@ -170,7 +172,7 @@ This document provides an overview of various AI providers and models, including
|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)| |solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)|
|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)| |solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)| |pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
|deepseek|DeepSeek|1+ Providers|[deepseek.com](https://www.deepseek.com/)| |deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)|
|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)| |wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)|
|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)| |wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)| |sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
@ -190,6 +192,10 @@ This document provides an overview of various AI providers and models, including
|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)| |german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)|
|tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)| |tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)|
|cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)| |cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)|
|openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)|
|lfm-40b|Liquid|1+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)|
|zephyr-7b|HuggingFaceH4|1+ Providers|[huggingface.co](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)|
### Image Models ### Image Models
| Model | Base Provider | Providers | Website | | Model | Base Provider | Providers | Website |

View File

@ -1,105 +1,30 @@
from __future__ import annotations from __future__ import annotations
import random from typing import Any, Dict
import json import inspect
import re
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse from .helper import format_prompt
from .airforce.AirforceChat import AirforceChat
def split_long_message(message: str, max_length: int = 4000) -> list[str]: from .airforce.AirforceImage import AirforceImage
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
class Airforce(AsyncGeneratorProvider, ProviderModelMixin): class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce" url = "https://api.airforce"
image_api_endpoint = "https://api.airforce/imagine2" api_endpoint_completions = AirforceChat.api_endpoint_completions
text_api_endpoint = "https://api.airforce/chat/completions" api_endpoint_imagine2 = AirforceImage.api_endpoint_imagine2
working = True working = True
supports_stream = AirforceChat.supports_stream
supports_system_message = AirforceChat.supports_system_message
supports_message_history = AirforceChat.supports_message_history
default_model = 'llama-3-70b-chat' default_model = AirforceChat.default_model
models = [*AirforceChat.text_models, *AirforceImage.image_models]
supports_stream = True
supports_system_message = True
supports_message_history = True
text_models = [
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'chatgpt-4o-latest',
'gpt-4',
'gpt-4-turbo',
'gpt-4o-mini-2024-07-18',
'gpt-4o-mini',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
default_model,
'llama-3-70b-chat-turbo',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-70b-chat-lite',
'llama-3-8b-chat-lite',
'llama-2-13b-chat',
'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
'LlamaGuard-2-8b',
'Llama-Guard-7b',
'Llama-3.2-90B-Vision-Instruct-Turbo',
'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',
'Qwen1.5-7B-Chat',
'Qwen1.5-14B-Chat',
'Qwen1.5-72B-Chat',
'Qwen1.5-110B-Chat',
'Qwen2-72B-Instruct',
'gemma-2b-it',
'gemma-2-9b-it',
'gemma-2-27b-it',
'gemini-1.5-flash',
'gemini-1.5-pro',
'deepseek-llm-67b-chat',
'Nous-Hermes-2-Mixtral-8x7B-DPO',
'Nous-Hermes-2-Yi-34B',
'WizardLM-2-8x22B',
'SOLAR-10.7B-Instruct-v1.0',
'MythoMax-L2-13b',
'cosmosrp',
]
image_models = [
'flux',
'flux-realism',
'flux-anime',
'flux-3d',
'flux-disney',
'flux-pixel',
'flux-4o',
'any-dark',
]
models = [
*text_models,
*image_models,
]
model_aliases = { model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307", **AirforceChat.model_aliases,
"claude-3-sonnet": "claude-3-sonnet-20240229", **AirforceImage.model_aliases
"gpt-4o": "chatgpt-4o-latest",
"llama-3-70b": "llama-3-70b-chat",
"llama-3-8b": "llama-3-8b-chat",
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"qwen-1.5-7b": "Qwen1.5-7B-Chat",
"gemma-2b": "gemma-2b-it",
"gemini-flash": "gemini-1.5-flash",
"mythomax-l2-13b": "MythoMax-L2-13b",
"solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
} }
@classmethod @classmethod
@ -107,139 +32,28 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
if model in cls.models: if model in cls.models:
return model return model
elif model in cls.model_aliases: elif model in cls.model_aliases:
return cls.model_aliases.get(model, cls.default_model) return cls.model_aliases[model]
else: else:
return cls.default_model return cls.default_model
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult:
cls,
model: str,
messages: Messages,
proxy: str = None,
seed: int = None,
size: str = "1:1",
stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model)
if model in cls.image_models: provider = AirforceChat if model in AirforceChat.text_models else AirforceImage
async for result in cls._generate_image(model, messages, proxy, seed, size):
if model not in provider.models:
raise ValueError(f"Unsupported model: {model}")
# Get the signature of the provider's create_async_generator method
sig = inspect.signature(provider.create_async_generator)
# Filter kwargs to only include parameters that the provider's method accepts
filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
# Add model and messages to filtered_kwargs
filtered_kwargs['model'] = model
filtered_kwargs['messages'] = messages
async for result in provider.create_async_generator(**filtered_kwargs):
yield result yield result
elif model in cls.text_models:
async for result in cls._generate_text(model, messages, proxy, stream):
yield result
@classmethod
async def _generate_image(
cls,
model: str,
messages: Messages,
proxy: str = None,
seed: int = None,
size: str = "1:1",
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"origin": "https://llmplayground.net",
"user-agent": "Mozilla/5.0"
}
if seed is None:
seed = random.randint(0, 100000)
prompt = messages[-1]['content']
async with ClientSession(headers=headers) as session:
params = {
"model": model,
"prompt": prompt,
"size": size,
"seed": seed
}
async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
response.raise_for_status()
content_type = response.headers.get('Content-Type', '').lower()
if 'application/json' in content_type:
async for chunk in response.content.iter_chunked(1024):
if chunk:
yield chunk.decode('utf-8')
elif 'image' in content_type:
image_data = b""
async for chunk in response.content.iter_chunked(1024):
if chunk:
image_data += chunk
image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
alt_text = f"Generated image for prompt: {prompt}"
yield ImageResponse(images=image_url, alt=alt_text)
@classmethod
async def _generate_text(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"authorization": "Bearer missing api key",
"content-type": "application/json",
"user-agent": "Mozilla/5.0"
}
async with ClientSession(headers=headers) as session:
formatted_prompt = cls._format_messages(messages)
prompt_parts = split_long_message(formatted_prompt)
full_response = ""
for part in prompt_parts:
data = {
"messages": [{"role": "user", "content": part}],
"model": model,
"max_tokens": 4096,
"temperature": 1,
"top_p": 1,
"stream": stream
}
async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
part_response = ""
if stream:
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
json_data = json.loads(line[6:])
content = json_data['choices'][0]['delta'].get('content', '')
part_response += content
else:
json_data = await response.json()
content = json_data['choices'][0]['message']['content']
part_response = content
part_response = re.sub(
r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
'',
part_response
)
part_response = re.sub(
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
'',
part_response
)
full_response += part_response
yield full_response
@classmethod
def _format_messages(cls, messages: Messages) -> str:
return " ".join([msg['content'] for msg in messages])

View File

@ -0,0 +1,375 @@
from __future__ import annotations
import re
from aiohttp import ClientSession
import json
from typing import List
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
def clean_response(text: str) -> str:
"""Clean response from unwanted patterns."""
patterns = [
r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
r"</s>", # zephyr-7b-beta
]
for pattern in patterns:
text = re.sub(pattern, '', text)
return text.strip()
def split_message(message: dict, chunk_size: int = 995) -> List[dict]:
"""Split a message into chunks of specified size."""
content = message.get('content', '')
if len(content) <= chunk_size:
return [message]
chunks = []
while content:
chunk = content[:chunk_size]
content = content[chunk_size:]
chunks.append({
'role': message['role'],
'content': chunk
})
return chunks
def split_messages(messages: Messages, chunk_size: int = 995) -> Messages:
"""Split all messages that exceed chunk_size into smaller messages."""
result = []
for message in messages:
result.extend(split_message(message, chunk_size))
return result
class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "AirForce Chat"
api_endpoint_completions = "https://api.airforce/chat/completions" # Замініть на реальний ендпоінт
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'llama-3-70b-chat'
text_models = [
# anthropic
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-5-sonnet-20240620',
'claude-3-5-sonnet-20241022',
'claude-3-opus-20240229',
# openai
'chatgpt-4o-latest',
'gpt-4',
'gpt-4-turbo',
'gpt-4o-2024-05-13',
'gpt-4o-mini-2024-07-18',
'gpt-4o-mini',
'gpt-4o-2024-08-06',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
'gpt-4o',
'gpt-4-turbo-2024-04-09',
'gpt-4-0125-preview',
'gpt-4-1106-preview',
# meta-llama
default_model,
'llama-3-70b-chat-turbo',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-70b-chat-lite',
'llama-3-8b-chat-lite',
'llama-2-13b-chat',
'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
'LlamaGuard-2-8b',
'llamaguard-7b',
'Llama-Vision-Free',
'Llama-Guard-7b',
'Llama-3.2-90B-Vision-Instruct-Turbo',
'Meta-Llama-Guard-3-8B',
'Llama-3.2-11B-Vision-Instruct-Turbo',
'Llama-Guard-3-11B-Vision-Turbo',
'Llama-3.2-3B-Instruct-Turbo',
'Llama-3.2-1B-Instruct-Turbo',
'llama-2-7b-chat-int8',
'llama-2-7b-chat-fp16',
'Llama 3.1 405B Instruct',
'Llama 3.1 70B Instruct',
'Llama 3.1 8B Instruct',
# mistral-ai
'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',
# Gryphe
'MythoMax-L2-13b-Lite',
'MythoMax-L2-13b',
# openchat
'openchat-3.5-0106',
# qwen
#'Qwen1.5-72B-Chat', Пуста відповідь
#'Qwen1.5-110B-Chat', Пуста відповідь
'Qwen2-72B-Instruct',
'Qwen2.5-7B-Instruct-Turbo',
'Qwen2.5-72B-Instruct-Turbo',
# google
'gemma-2b-it',
'gemma-2-9b-it',
'gemma-2-27b-it',
# gemini
'gemini-1.5-flash',
'gemini-1.5-pro',
# databricks
'dbrx-instruct',
# deepseek-ai
'deepseek-coder-6.7b-base',
'deepseek-coder-6.7b-instruct',
'deepseek-math-7b-instruct',
# NousResearch
'deepseek-math-7b-instruct',
'Nous-Hermes-2-Mixtral-8x7B-DPO',
'hermes-2-pro-mistral-7b',
# teknium
'openhermes-2.5-mistral-7b',
# microsoft
'WizardLM-2-8x22B',
'phi-2',
# upstage
'SOLAR-10.7B-Instruct-v1.0',
# pawan
'cosmosrp',
# liquid
'lfm-40b-moe',
# DiscoResearch
'discolm-german-7b-v1',
# tiiuae
'falcon-7b-instruct',
# defog
'sqlcoder-7b-2',
# tinyllama
'tinyllama-1.1b-chat',
# HuggingFaceH4
'zephyr-7b-beta',
]
models = [*text_models]
model_aliases = {
# anthropic
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3-opus": "claude-3-opus-20240229",
# openai
"gpt-4o": "chatgpt-4o-latest",
#"gpt-4": "gpt-4",
#"gpt-4-turbo": "gpt-4-turbo",
"gpt-4o": "gpt-4o-2024-05-13",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
#"gpt-4o-mini": "gpt-4o-mini",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-3.5-turbo": "gpt-3.5-turbo",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
#"gpt-4o": "gpt-4o",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4": "gpt-4-0125-preview",
"gpt-4": "gpt-4-1106-preview",
# meta-llama
"llama-3-70b": "llama-3-70b-chat",
"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat-lite",
"llama-2-13b": "llama-2-13b-chat",
"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
"llamaguard-2-8b": "LlamaGuard-2-8b",
"llamaguard-7b": "llamaguard-7b",
#"llama_vision_free": "Llama-Vision-Free", # Unknown
"llamaguard-7b": "Llama-Guard-7b",
"llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
"llamaguard-3-8b": "Meta-Llama-Guard-3-8B",
"llama-3.2-11b": "Llama-3.2-11B-Vision-Instruct-Turbo",
"llamaguard-3-11b": "Llama-Guard-3-11B-Vision-Turbo",
"llama-3.2-3b": "Llama-3.2-3B-Instruct-Turbo",
"llama-3.2-1b": "Llama-3.2-1B-Instruct-Turbo",
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-2-7b": "llama-2-7b-chat-fp16",
"llama-3.1-405b": "Llama 3.1 405B Instruct",
"llama-3.1-70b": "Llama 3.1 70B Instruct",
"llama-3.1-8b": "Llama 3.1 8B Instruct",
# mistral-ai
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.1",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.2",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.3",
# Gryphe
"mythomax-13b": "MythoMax-L2-13b-Lite",
"mythomax-13b": "MythoMax-L2-13b",
# openchat
"openchat-3.5": "openchat-3.5-0106",
# qwen
#"qwen-1.5-72b": "Qwen1.5-72B-Chat", # Empty answer
#"qwen-1.5-110b": "Qwen1.5-110B-Chat", # Empty answer
"qwen-2-72b": "Qwen2-72B-Instruct",
"qwen-2-5-7b": "Qwen2.5-7B-Instruct-Turbo",
"qwen-2-5-72b": "Qwen2.5-72B-Instruct-Turbo",
# google
"gemma-2b": "gemma-2b-it",
"gemma-2-9b": "gemma-2-9b-it",
"gemma-2b-27b": "gemma-2-27b-it",
# gemini
"gemini-flash": "gemini-1.5-flash",
"gemini-pro": "gemini-1.5-pro",
# databricks
"dbrx-instruct": "dbrx-instruct",
# deepseek-ai
#"deepseek-coder": "deepseek-coder-6.7b-base",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
#"deepseek-math": "deepseek-math-7b-instruct",
# NousResearch
#"deepseek-math": "deepseek-math-7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"hermes-2": "hermes-2-pro-mistral-7b",
# teknium
"openhermes-2.5": "openhermes-2.5-mistral-7b",
# microsoft
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
#"phi-2": "phi-2",
# upstage
"solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
# pawan
#"cosmosrp": "cosmosrp",
# liquid
"lfm-40b": "lfm-40b-moe",
# DiscoResearch
"german-7b": "discolm-german-7b-v1",
# tiiuae
#"falcon-7b": "falcon-7b-instruct",
# defog
#"sqlcoder-7b": "sqlcoder-7b-2",
# tinyllama
#"tinyllama-1b": "tinyllama-1.1b-chat",
# HuggingFaceH4
"zephyr-7b": "zephyr-7b-beta",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
max_tokens: str = 4096,
temperature: str = 1,
top_p: str = 1,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
chunked_messages = split_messages(messages)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'authorization': 'Bearer missing api key',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://llmplayground.net',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': 'https://llmplayground.net/',
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
data = {
"messages": chunked_messages,
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"stream": stream
}
async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
response.raise_for_status()
text = ""
if stream:
async for line in response.content:
line = line.decode('utf-8')
if line.startswith('data: '):
json_str = line[6:]
try:
chunk = json.loads(json_str)
if 'choices' in chunk and chunk['choices']:
content = chunk['choices'][0].get('delta', {}).get('content', '')
text += content # Збираємо дельти
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {json_str}, Error: {e}")
elif line.strip() == "[DONE]":
break
yield clean_response(text)
else:
response_json = await response.json()
text = response_json["choices"][0]["message"]["content"]
yield clean_response(text)

View File

@ -0,0 +1,97 @@
from __future__ import annotations
from aiohttp import ClientSession
import random
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...image import ImageResponse
class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
label = "Airforce Image"
#url = "https://api.airforce"
api_endpoint_imagine2 = "https://api.airforce/imagine2"
#working = True
default_model = 'flux'
image_models = [
'flux',
'flux-realism',
'flux-anime',
'flux-3d',
'flux-disney',
'flux-pixel',
'flux-4o',
'any-dark',
'stable-diffusion-xl-base',
'stable-diffusion-xl-lightning',
]
models = [*image_models]
model_aliases = {
"sdxl": "stable-diffusion-xl-base",
"sdxl": "stable-diffusion-xl-lightning",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
size: str = '1:1',
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'authorization': 'Bearer missing api key',
'cache-control': 'no-cache',
'origin': 'https://llmplayground.net',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': 'https://llmplayground.net/',
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
prompt = messages[-1]['content']
seed = random.randint(0, 4294967295)
params = {
'model': model,
'prompt': prompt,
'size': size,
'seed': str(seed)
}
async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
response.raise_for_status()
if response.status == 200:
content_type = response.headers.get('Content-Type', '')
if 'image' in content_type:
image_url = str(response.url)
yield ImageResponse(image_url, alt="Airforce generated image")
else:
content = await response.text()
yield f"Unexpected content type: {content_type}\nResponse content: {content}"
else:
error_content = await response.text()
yield f"Error: {error_content}"

View File

@ -0,0 +1,2 @@
from .AirforceChat import AirforceChat
from .AirforceImage import AirforceImage

View File

@ -130,7 +130,7 @@ gpt_3 = Model(
gpt_35_turbo = Model( gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots]) best_provider = IterListProvider([Allyfy, NexraChatGPT, DarkAI, Airforce, Liaobots])
) )
# gpt-4 # gpt-4
@ -191,7 +191,7 @@ meta = Model(
llama_2_7b = Model( llama_2_7b = Model(
name = "llama-2-7b", name = "llama-2-7b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = Cloudflare best_provider = IterListProvider([Cloudflare, Airforce])
) )
llama_2_13b = Model( llama_2_13b = Model(
@ -217,13 +217,13 @@ llama_3_70b = Model(
llama_3_1_8b = Model( llama_3_1_8b = Model(
name = "llama-3.1-8b", name = "llama-3.1-8b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, GizAI, PerplexityLabs]) best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, GizAI, Airforce, PerplexityLabs])
) )
llama_3_1_70b = Model( llama_3_1_70b = Model(
name = "llama-3.1-70b", name = "llama-3.1-70b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, GizAI, HuggingFace, PerplexityLabs]) best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, AiMathGPT, RubiksAI, GizAI, Airforce, HuggingFace, PerplexityLabs])
) )
llama_3_1_405b = Model( llama_3_1_405b = Model(
@ -236,19 +236,19 @@ llama_3_1_405b = Model(
llama_3_2_1b = Model( llama_3_2_1b = Model(
name = "llama-3.2-1b", name = "llama-3.2-1b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = Cloudflare best_provider = IterListProvider([Cloudflare, Airforce])
) )
llama_3_2_3b = Model( llama_3_2_3b = Model(
name = "llama-3.2-3b", name = "llama-3.2-3b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = Cloudflare best_provider = IterListProvider([Cloudflare, Airforce])
) )
llama_3_2_11b = Model( llama_3_2_11b = Model(
name = "llama-3.2-11b", name = "llama-3.2-11b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, HuggingChat, HuggingFace]) best_provider = IterListProvider([Cloudflare, HuggingChat, Airforce, HuggingFace])
) )
llama_3_2_90b = Model( llama_3_2_90b = Model(
@ -271,6 +271,18 @@ llamaguard_2_8b = Model(
best_provider = Airforce best_provider = Airforce
) )
llamaguard_3_8b = Model(
name = "llamaguard-3-8b",
base_provider = "Meta Llama",
best_provider = Airforce
)
llamaguard_3_11b = Model(
name = "llamaguard-3-11b",
base_provider = "Meta Llama",
best_provider = Airforce
)
### Mistral ### ### Mistral ###
mistral_7b = Model( mistral_7b = Model(
@ -305,14 +317,14 @@ mistral_large = Model(
### NousResearch ### ### NousResearch ###
mixtral_8x7b_dpo = Model( hermes_2 = Model(
name = "mixtral-8x7b-dpo", name = "hermes-2",
base_provider = "NousResearch", base_provider = "NousResearch",
best_provider = Airforce best_provider = Airforce
) )
yi_34b = Model( hermes_2_dpo = Model(
name = "yi-34b", name = "hermes-2-dpo",
base_provider = "NousResearch", base_provider = "NousResearch",
best_provider = Airforce best_provider = Airforce
) )
@ -328,7 +340,7 @@ hermes_3 = Model(
phi_2 = Model( phi_2 = Model(
name = "phi-2", name = "phi-2",
base_provider = "Microsoft", base_provider = "Microsoft",
best_provider = Cloudflare best_provider = IterListProvider([Cloudflare, Airforce])
) )
phi_3_medium_4k = Model( phi_3_medium_4k = Model(
@ -364,10 +376,10 @@ gemini = Model(
) )
# gemma # gemma
gemma_2b_9b = Model( gemma_2b = Model(
name = 'gemma-2b-9b', name = 'gemma-2b',
base_provider = 'Google', base_provider = 'Google',
best_provider = Airforce best_provider = IterListProvider([ReplicateHome, Airforce])
) )
gemma_2b_27b = Model( gemma_2b_27b = Model(
@ -376,12 +388,6 @@ gemma_2b_27b = Model(
best_provider = IterListProvider([DeepInfraChat, Airforce]) best_provider = IterListProvider([DeepInfraChat, Airforce])
) )
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
best_provider = IterListProvider([ReplicateHome, Airforce])
)
gemma_7b = Model( gemma_7b = Model(
name = 'gemma-7b', name = 'gemma-7b',
base_provider = 'Google', base_provider = 'Google',
@ -389,18 +395,18 @@ gemma_7b = Model(
) )
# gemma 2 # gemma 2
gemma_2_27b = Model(
name = 'gemma-2-27b',
base_provider = 'Google',
best_provider = Airforce
)
gemma_2 = Model( gemma_2 = Model(
name = 'gemma-2', name = 'gemma-2',
base_provider = 'Google', base_provider = 'Google',
best_provider = ChatHub best_provider = ChatHub
) )
gemma_2_9b = Model(
name = 'gemma-2-9b',
base_provider = 'Google',
best_provider = Airforce
)
### Anthropic ### ### Anthropic ###
claude_2_1 = Model( claude_2_1 = Model(
@ -413,26 +419,26 @@ claude_2_1 = Model(
claude_3_opus = Model( claude_3_opus = Model(
name = 'claude-3-opus', name = 'claude-3-opus',
base_provider = 'Anthropic', base_provider = 'Anthropic',
best_provider = IterListProvider([Airforce, Liaobots]) best_provider = IterListProvider([Liaobots])
) )
claude_3_sonnet = Model( claude_3_sonnet = Model(
name = 'claude-3-sonnet', name = 'claude-3-sonnet',
base_provider = 'Anthropic', base_provider = 'Anthropic',
best_provider = IterListProvider([Airforce, Liaobots]) best_provider = IterListProvider([Liaobots])
) )
claude_3_haiku = Model( claude_3_haiku = Model(
name = 'claude-3-haiku', name = 'claude-3-haiku',
base_provider = 'Anthropic', base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, Airforce, GizAI, Liaobots]) best_provider = IterListProvider([DDG, GizAI, Liaobots])
) )
# claude 3.5 # claude 3.5
claude_3_5_sonnet = Model( claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet', name = 'claude-3.5-sonnet',
base_provider = 'Anthropic', base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, GizAI, Liaobots]) best_provider = IterListProvider([Blackbox, Editee, AmigoChat, GizAI, Liaobots])
) )
@ -493,31 +499,13 @@ qwen_1_5_0_5b = Model(
qwen_1_5_7b = Model( qwen_1_5_7b = Model(
name = 'qwen-1.5-7b', name = 'qwen-1.5-7b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([Cloudflare, Airforce]) best_provider = IterListProvider([Cloudflare])
) )
qwen_1_5_14b = Model( qwen_1_5_14b = Model(
name = 'qwen-1.5-14b', name = 'qwen-1.5-14b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt, Cloudflare, Airforce]) best_provider = IterListProvider([FreeChatgpt, Cloudflare])
)
qwen_1_5_72b = Model(
name = 'qwen-1.5-72b',
base_provider = 'Qwen',
best_provider = Airforce
)
qwen_1_5_110b = Model(
name = 'qwen-1.5-110b',
base_provider = 'Qwen',
best_provider = Airforce
)
qwen_1_5_1_8b = Model(
name = 'qwen-1.5-1.8b',
base_provider = 'Qwen',
best_provider = Airforce
) )
# qwen 2 # qwen 2
@ -527,6 +515,18 @@ qwen_2_72b = Model(
best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace]) best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
) )
qwen_2_5_7b = Model(
name = 'qwen-2-5-7b',
base_provider = 'Qwen',
best_provider = Airforce
)
qwen_2_5_72b = Model(
name = 'qwen-2-5-72b',
base_provider = 'Qwen',
best_provider = Airforce
)
qwen = Model( qwen = Model(
name = 'qwen', name = 'qwen',
base_provider = 'Qwen', base_provider = 'Qwen',
@ -556,18 +556,18 @@ yi_1_5_9b = Model(
) )
### Upstage ### ### Upstage ###
solar_1_mini = Model(
name = 'solar-1-mini',
base_provider = 'Upstage',
best_provider = Upstage
)
solar_10_7b = Model( solar_10_7b = Model(
name = 'solar-10-7b', name = 'solar-10-7b',
base_provider = 'Upstage', base_provider = 'Upstage',
best_provider = Airforce best_provider = Airforce
) )
solar_mini = Model(
name = 'solar-mini',
base_provider = 'Upstage',
best_provider = Upstage
)
solar_pro = Model( solar_pro = Model(
name = 'solar-pro', name = 'solar-pro',
base_provider = 'Upstage', base_provider = 'Upstage',
@ -583,8 +583,8 @@ pi = Model(
) )
### DeepSeek ### ### DeepSeek ###
deepseek = Model( deepseek_coder = Model(
name = 'deepseek', name = 'deepseek-coder',
base_provider = 'DeepSeek', base_provider = 'DeepSeek',
best_provider = Airforce best_provider = Airforce
) )
@ -630,7 +630,7 @@ lzlv_70b = Model(
openchat_3_5 = Model( openchat_3_5 = Model(
name = 'openchat-3.5', name = 'openchat-3.5',
base_provider = 'OpenChat', base_provider = 'OpenChat',
best_provider = Cloudflare best_provider = IterListProvider([Cloudflare])
) )
openchat_3_6_8b = Model( openchat_3_6_8b = Model(
@ -683,11 +683,34 @@ sonar_chat = Model(
best_provider = PerplexityLabs best_provider = PerplexityLabs
) )
### TheBloke ###
german_7b = Model(
name = 'german-7b',
base_provider = 'TheBloke',
best_provider = Cloudflare
)
### Gryphe ###
mythomax_l2_13b = Model( ### Fblgit ###
name = 'mythomax-l2-13b', cybertron_7b = Model(
base_provider = 'Gryphe', name = 'cybertron-7b',
base_provider = 'Fblgit',
best_provider = Cloudflare
)
### Nvidia ###
nemotron_70b = Model(
name = 'nemotron-70b',
base_provider = 'Nvidia',
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
### Teknium ###
openhermes_2_5 = Model(
name = 'openhermes-2.5',
base_provider = 'Teknium',
best_provider = Airforce best_provider = Airforce
) )
@ -700,34 +723,27 @@ cosmosrp = Model(
) )
### TheBloke ### ### Liquid ###
lfm_40b = Model(
name = 'lfm-40b',
base_provider = 'Liquid',
best_provider = Airforce
)
### DiscoResearch ###
german_7b = Model( german_7b = Model(
name = 'german-7b', name = 'german-7b',
base_provider = 'TheBloke', base_provider = 'DiscoResearch',
best_provider = Cloudflare best_provider = Airforce
) )
### Tinyllama ### ### HuggingFaceH4 ###
tinyllama_1_1b = Model( zephyr_7b = Model(
name = 'tinyllama-1.1b', name = 'zephyr-7b',
base_provider = 'Tinyllama', base_provider = 'HuggingFaceH4',
best_provider = Cloudflare best_provider = Airforce
)
### Fblgit ###
cybertron_7b = Model(
name = 'cybertron-7b',
base_provider = 'Fblgit',
best_provider = Cloudflare
)
### Nvidia ###
nemotron_70b = Model(
name = 'nemotron-70b',
base_provider = 'Nvidia',
best_provider = IterListProvider([HuggingChat, HuggingFace])
) )
@ -754,7 +770,7 @@ sdxl_lora = Model(
sdxl = Model( sdxl = Model(
name = 'sdxl', name = 'sdxl',
base_provider = 'Stability AI', base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome]) best_provider = IterListProvider([ReplicateHome, Airforce])
) )
@ -947,6 +963,8 @@ class ModelUtils:
# llamaguard # llamaguard
'llamaguard-7b': llamaguard_7b, 'llamaguard-7b': llamaguard_7b,
'llamaguard-2-8b': llamaguard_2_8b, 'llamaguard-2-8b': llamaguard_2_8b,
'llamaguard-3-8b': llamaguard_3_8b,
'llamaguard-3-11b': llamaguard_3_11b,
### Mistral ### ### Mistral ###
@ -958,17 +976,17 @@ class ModelUtils:
### NousResearch ### ### NousResearch ###
'mixtral-8x7b-dpo': mixtral_8x7b_dpo, 'hermes-2': hermes_2,
'hermes-2-dpo': hermes_2_dpo,
'hermes-3': hermes_3, 'hermes-3': hermes_3,
'yi-34b': yi_34b,
### Microsoft ### ### Microsoft ###
'phi-2': phi_2, 'phi-2': phi_2,
'phi_3_medium-4k': phi_3_medium_4k, 'phi_3_medium-4k': phi_3_medium_4k,
'phi-3.5-mini': phi_3_5_mini, 'phi-3.5-mini': phi_3_5_mini,
### Google ### ### Google ###
# gemini # gemini
'gemini': gemini, 'gemini': gemini,
@ -977,13 +995,12 @@ class ModelUtils:
# gemma # gemma
'gemma-2b': gemma_2b, 'gemma-2b': gemma_2b,
'gemma-2b-9b': gemma_2b_9b,
'gemma-2b-27b': gemma_2b_27b, 'gemma-2b-27b': gemma_2b_27b,
'gemma-7b': gemma_7b, 'gemma-7b': gemma_7b,
# gemma-2 # gemma-2
'gemma-2': gemma_2, 'gemma-2': gemma_2,
'gemma-2-27b': gemma_2_27b, 'gemma-2-9b': gemma_2_9b,
### Anthropic ### ### Anthropic ###
@ -1028,10 +1045,9 @@ class ModelUtils:
'qwen-1.5-0.5b': qwen_1_5_0_5b, 'qwen-1.5-0.5b': qwen_1_5_0_5b,
'qwen-1.5-7b': qwen_1_5_7b, 'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-14b': qwen_1_5_14b, 'qwen-1.5-14b': qwen_1_5_14b,
'qwen-1.5-72b': qwen_1_5_72b,
'qwen-1.5-110b': qwen_1_5_110b,
'qwen-1.5-1.8b': qwen_1_5_1_8b,
'qwen-2-72b': qwen_2_72b, 'qwen-2-72b': qwen_2_72b,
'qwen-2-5-7b': qwen_2_5_7b,
'qwen-2-5-72b': qwen_2_5_72b,
### Zhipu AI ### ### Zhipu AI ###
@ -1044,16 +1060,17 @@ class ModelUtils:
### Upstage ### ### Upstage ###
'solar-mini': solar_1_mini,
'solar-10-7b': solar_10_7b, 'solar-10-7b': solar_10_7b,
'solar-mini': solar_mini,
'solar-pro': solar_pro, 'solar-pro': solar_pro,
### Inflection ### ### Inflection ###
'pi': pi, 'pi': pi,
### DeepSeek ### ### DeepSeek ###
'deepseek': deepseek, 'deepseek-coder': deepseek_coder,
### Yorickvp ### ### Yorickvp ###
@ -1096,22 +1113,10 @@ class ModelUtils:
'sonar-chat': sonar_chat, 'sonar-chat': sonar_chat,
### Gryphe ###
'mythomax-l2-13b': sonar_chat,
### Pawan ###
'cosmosrp': cosmosrp,
### TheBloke ### ### TheBloke ###
'german-7b': german_7b, 'german-7b': german_7b,
### Tinyllama ###
'tinyllama-1.1b': tinyllama_1_1b,
### Fblgit ### ### Fblgit ###
'cybertron-7b': cybertron_7b, 'cybertron-7b': cybertron_7b,
@ -1120,6 +1125,26 @@ class ModelUtils:
'nemotron-70b': nemotron_70b, 'nemotron-70b': nemotron_70b,
### Teknium ###
'openhermes-2.5': openhermes_2_5,
### Pawan ###
'cosmosrp': cosmosrp,
### Liquid ###
'lfm-40b': lfm_40b,
### DiscoResearch ###
'german-7b': german_7b,
### HuggingFaceH4 ###
'zephyr-7b': zephyr_7b,
############# #############
### Image ### ### Image ###