Update (g4f/models.py g4f/Provider/ docs/providers-and-models.md)

This commit is contained in:
kqlio67 2024-11-06 17:25:09 +02:00
parent 087a4d684c
commit e98793d0a7
38 changed files with 173 additions and 493 deletions

View File

@ -18,16 +18,11 @@ This document provides an overview of various AI providers and models, including
| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
|----------|-------------|--------------|---------------|--------|--------|------|
|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`claude-3-haiku, claude-3-sonnet, claude-3-opus, gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, llamaguard-3-8b, llama-3.2-11b, llamaguard-3-11b, llama-3.2-3b, llama-3.2-1b, llama-2-7b, mixtral-8x7b, mixtral-8x22b, mythomax-13b, openchat-3.5, qwen-2-72b, qwen-2-5-7b, qwen-2-5-72b, gemma-2b, gemma-2-9b, gemma-2b-27b, gemini-flash, gemini-pro, dbrx-instruct, deepseek-coder, hermes-2-dpo, hermes-2, openhermes-2.5, wizardlm-2-8x22b, phi-2, solar-10-7b, cosmosrp, lfm-40b, german-7b, zephyr-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|`blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -35,34 +30,25 @@ This document provides an overview of various AI providers and models, including
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[darkai.foundation/chat](https://darkai.foundation/chat)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-405b, llama-3.1-70b, llama-3.1-8B, mixtral-8x22b, mixtral-8x7b, wizardlm-2-8x22b, wizardlm-2-7b, qwen-2-72b, phi-3-medium-4k, gemma-2b-27b, minicpm-llama-3-v2.5, mistral-7b, lzlv_70b, openchat-3.6-8b, phind-codellama-34b-v2, dolphin-2.9.1-llama-3-70b`|❌|`minicpm-llama-3-v2.5`|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, wizardlm-2-8x22b, qwen-2-72b`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.Editee`|`claude-3.5-sonnet, gpt-4o, gemini-pro, mistral-large`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[flowgpt.com](https://flowgpt.com/chat)|`g4f.Provider.FlowGpt`|✔||❌|✔|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.chatgpt.org.uk](https://chat.chatgpt.org.uk)|`g4f.Provider.FreeChatgpt`|`qwen-1.5-14b, sparkdesk-v1.1, qwen-2-7b, glm-4-9b, glm-3-6b, yi-1.5-9b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[free.netfly.top](https://free.netfly.top)|`g4f.Provider.FreeNetfly`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mixtral-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|✔|❌|✔|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[gprochat.com](https://gprochat.com)|`g4f.Provider.GPROChat`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[app.myshell.ai/chat](https://app.myshell.ai/chat)|`g4f.Provider.MyShell`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[nexra.aryahcr.cc/bing](https://nexra.aryahcr.cc/documentation/bing/en)|`g4f.Provider.NexraBing`|✔|❌|❌|✔|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[nexra.aryahcr.cc/blackbox](https://nexra.aryahcr.cc/documentation/blackbox/en)|`g4f.Provider.NexraBlackbox`|`blackboxai` |❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[nexra.aryahcr.cc/chatgpt](https://nexra.aryahcr.cc/documentation/chatgpt/en)|`g4f.Provider.NexraChatGPT`|`gpt-4, gpt-3.5-turbo, gpt-3, gpt-4o` |❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -77,12 +63,11 @@ This document provides an overview of various AI providers and models, including
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSD15`|❌|`sd-1.5`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDLora`|❌|`sdxl-lora`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDTurbo`|❌|`sdxl-turbo`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
|[openrouter.ai](https://openrouter.ai)|`g4f.Provider.OpenRouter`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌|
|[]()|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
@ -177,7 +162,6 @@ This document provides an overview of various AI providers and models, including
|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
|llava-13b|Yorickvp|1+ Providers|[huggingface.co](https://huggingface.co/liuhaotian/llava-v1.5-13b)|
|minicpm-llama-3-v2.5|OpenBMB|1+ Providers|[huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
|lzlv-70b|Lzlv|1+ Providers|[huggingface.co](https://huggingface.co/lizpreciatior/lzlv_70b_fp16_hf)|
|openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)|
|openchat-3.6-8b|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat-3.6-8b-20240522)|
@ -227,7 +211,6 @@ This document provides an overview of various AI providers and models, including
|gpt-4-vision|OpenAI|1+ Providers|[openai.com](https://openai.com/research/gpt-4v-system-card)|
|gemini-pro-vision|Google DeepMind|1+ Providers | [deepmind.google](https://deepmind.google/technologies/gemini/)|
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|minicpm-llama-3-v2.5|OpenBMB|1+ Providers | [huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
### Providers and vision models
| Provider | Base Provider | | Vision Models | Status | Auth |

View File

@ -9,19 +9,19 @@ from .helper import format_prompt
class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.aiuncensored.info"
url = "https://darkai.foundation/chat"
api_endpoint = "https://darkai.foundation/chat"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o'
default_model = 'llama-3-405b'
models = [
default_model, # Uncensored
'gpt-4o', # Uncensored
'gpt-3.5-turbo', # Uncensored
'llama-3-70b', # Uncensored
'llama-3-405b',
default_model,
]
model_aliases = {
@ -51,8 +51,6 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
headers = {
"accept": "text/event-stream",
"content-type": "application/json",
"origin": "https://www.aiuncensored.info",
"referer": "https://www.aiuncensored.info/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:

View File

@ -6,7 +6,6 @@ import json
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
@ -17,42 +16,18 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct'
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
'meta-llama/Meta-Llama-3.1-405B-Instruct',
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-8B-Instruct',
'mistralai/Mixtral-8x22B-Instruct-v0.1',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
default_model,
'microsoft/WizardLM-2-8x22B',
'microsoft/WizardLM-2-7B',
'Qwen/Qwen2-72B-Instruct',
'microsoft/Phi-3-medium-4k-instruct',
'google/gemma-2-27b-it',
'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available
'mistralai/Mistral-7B-Instruct-v0.3',
'lizpreciatior/lzlv_70b_fp16_hf',
'openchat/openchat-3.6-8b',
'Phind/Phind-CodeLlama-34B-v2',
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
'Qwen/Qwen2.5-72B-Instruct',
]
model_aliases = {
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
"gemma-2b-27b": "google/gemma-2-27b-it",
"minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
"openchat-3.6-8b": "openchat/openchat-3.6-8b",
"phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
"dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
}
@ -97,30 +72,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
'model': model,
'messages': [
{'role': 'system', 'content': 'Be a helpful assistant'},
{'role': 'user', 'content': prompt}
],
'messages': messages,
'stream': True
}
if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None:
data['messages'][-1]['content'] = [
{
'type': 'image_url',
'image_url': {
'url': to_data_uri(image)
}
},
{
'type': 'text',
'text': messages[-1]['content']
}
]
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:

View File

@ -1,77 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Editee(AsyncGeneratorProvider, ProviderModelMixin):
label = "Editee"
url = "https://editee.com"
api_endpoint = "https://editee.com/submit/chatgptfree"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'claude'
models = ['claude', 'gpt4', 'gemini' 'mistrallarge']
model_aliases = {
"claude-3.5-sonnet": "claude",
"gpt-4o": "gpt4",
"gemini-pro": "gemini",
"mistral-large": "mistrallarge",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.9",
"Cache-Control": "no-cache",
"Content-Type": "application/json",
"Origin": cls.url,
"Pragma": "no-cache",
"Priority": "u=1, i",
"Referer": f"{cls.url}/chat-gpt",
"Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
"Sec-CH-UA-Mobile": '?0',
"Sec-CH-UA-Platform": '"Linux"',
"Sec-Fetch-Dest": 'empty',
"Sec-Fetch-Mode": 'cors',
"Sec-Fetch-Site": 'same-origin',
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
"X-Requested-With": 'XMLHttpRequest',
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"user_input": prompt,
"context": " ",
"template_id": "",
"selected_model": model
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_data = await response.json()
yield response_data['text']

View File

@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat10.free2gpt.xyz"
working = True
supports_message_history = True
default_model = 'llama-3.1-70b'
default_model = 'mistral-7b'
@classmethod
async def create_async_generator(
@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
system_message = {
"role": "system",
"content": ""
}
data = {
"messages": [system_message] + messages,
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),

View File

@ -1,96 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.chatgpt.org.uk"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
models = [
'@cf/qwen/qwen1.5-14b-chat-awq',
'SparkDesk-v1.1',
'Qwen2-7B-Instruct',
'glm4-9B-chat',
'chatglm3-6B',
'Yi-1.5-9B-Chat',
]
model_aliases = {
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"sparkdesk-v1.1": "SparkDesk-v1.1",
"qwen-2-7b": "Qwen2-7B-Instruct",
"glm-4-9b": "glm4-9B-chat",
"glm-3-6b": "chatglm3-6B",
"yi-1.5-9b": "Yi-1.5-9B-Chat",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model.lower() in cls.model_aliases:
return cls.model_aliases[model.lower()]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": [
{"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
{"role": "user", "content": prompt}
],
"stream": True,
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
accumulated_text = ""
async for line in response.content:
if line:
line_str = line.decode().strip()
if line_str == "data: [DONE]":
yield accumulated_text
break
elif line_str.startswith("data: "):
try:
chunk = json.loads(line_str[6:])
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
accumulated_text += delta_content
yield delta_content # Yield each chunk of content
except json.JSONDecodeError:
pass

View File

@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
default_model = 'llama-3.1-70b'
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(

View File

@ -63,6 +63,15 @@ models = {
"tokenLimit": 126000,
"context": "128K",
},
"grok-beta": {
"id": "grok-beta",
"name": "Grok-Beta",
"model": "Grok",
"provider": "x.ai",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
},
"grok-2": {
"id": "grok-2",
"name": "Grok-2",
@ -99,18 +108,18 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229-gcp": {
"id": "claude-3-opus-20240229-gcp",
"name": "Claude-3-Opus-Gcp",
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"claude-3-5-sonnet-20241022": {
"id": "claude-3-5-sonnet-20241022",
"name": "Claude-3.5-Sonnet-V2",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
@ -183,9 +192,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",

View File

@ -8,59 +8,40 @@ from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .selenium import *
from .needs_auth import *
from .not_working import *
from .local import *
from .gigachat import *
from .nexra import *
from .Ai4Chat import Ai4Chat
from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .AIUncensored import AIUncensored
from .Allyfy import Allyfy
from .AmigoChat import AmigoChat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .AiMathGPT import AiMathGPT
from .Airforce import Airforce
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
from .ChatGpt import ChatGpt
from .Chatgpt4Online import Chatgpt4Online
from .Chatgpt4o import Chatgpt4o
from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree
from .ChatifyAI import ChatifyAI
from .Cloudflare import Cloudflare
from .DarkAI import DarkAI
from .DDG import DDG
from .DeepInfraChat import DeepInfraChat
from .DeepInfraImage import DeepInfraImage
from .Editee import Editee
from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GizAI import GizAI
from .GPROChat import GPROChat
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Local import Local
from .MagickPen import MagickPen
from .MetaAI import MetaAI
#from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Prodia import Prodia
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything

View File

@ -25,11 +25,10 @@ from .Aichat import Aichat
from .Berlin import Berlin
from .Phind import Phind
from .AiAsk import AiAsk
from ..AiChatOnline import AiChatOnline
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
from .OpenAssistant import OpenAssistant
from .OpenAssistant import OpenAssistant

View File

@ -1,15 +1,15 @@
from __future__ import annotations
from ..locals.models import get_models
from ...locals.models import get_models
try:
from ..locals.provider import LocalProvider
from ...locals.provider import LocalProvider
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import Messages, CreateResult
from ..providers.base_provider import AbstractProvider, ProviderModelMixin
from ..errors import MissingRequirementsError
from ...typing import Messages, CreateResult
from ...providers.base_provider import AbstractProvider, ProviderModelMixin
from ...errors import MissingRequirementsError
class Local(AbstractProvider, ProviderModelMixin):
label = "GPT4All"
@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin):
messages,
stream,
**kwargs
)
)

View File

@ -3,8 +3,8 @@ from __future__ import annotations
import requests
import os
from .needs_auth.Openai import Openai
from ..typing import AsyncResult, Messages
from ..needs_auth.Openai import Openai
from ...typing import AsyncResult, Messages
class Ollama(Openai):
label = "Ollama"
@ -37,4 +37,4 @@ class Ollama(Openai):
api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
)

View File

@ -0,0 +1,2 @@
from .Local import Local
from .Ollama import Ollama

View File

@ -2,10 +2,10 @@ from __future__ import annotations
import requests
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..image import ImageResponse
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...image import ImageResponse
class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"

View File

@ -3,13 +3,13 @@ from __future__ import annotations
import json
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..errors import RateLimitError, ModelNotFoundError
from ..requests.raise_for_status import raise_for_status
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_connector
from ...errors import RateLimitError, ModelNotFoundError
from ...requests.raise_for_status import raise_for_status
from .HuggingChat import HuggingChat
from ..HuggingChat import HuggingChat
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"

View File

@ -8,12 +8,12 @@ from typing import Dict, List
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse, ImagePreview
from ..errors import ResponseError
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector, format_cookies
from ...typing import AsyncResult, Messages, Cookies
from ...requests import raise_for_status, DEFAULT_HEADERS
from ...image import ImageResponse, ImagePreview
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_connector, format_cookies
class Sources():
def __init__(self, link_list: List[Dict[str, str]]) -> None:

View File

@ -1,8 +1,8 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages, Cookies
from .helper import format_prompt, get_cookies
from .MetaAI import MetaAI
from ...typing import AsyncResult, Messages, Cookies
from ..helper import format_prompt, get_cookies
from ..MetaAI import MetaAI
class MetaAIAccount(MetaAI):
needs_auth = True
@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI):
) -> AsyncResult:
cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
yield chunk
yield chunk

View File

@ -1,32 +0,0 @@
from __future__ import annotations
import requests
from .Openai import Openai
from ...typing import AsyncResult, Messages
class OpenRouter(Openai):
label = "OpenRouter"
url = "https://openrouter.ai"
working = False
default_model = "mistralai/mistral-7b-instruct:free"
@classmethod
def get_models(cls):
if not cls.models:
url = 'https://openrouter.ai/api/v1/models'
models = requests.get(url).json()["data"]
cls.models = [model['id'] for model in models]
return cls.models
@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://openrouter.ai/api/v1",
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)

View File

@ -1,11 +1,11 @@
from __future__ import annotations
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, filter_none
from ..typing import AsyncResult, Messages
from ..requests import raise_for_status
from ..requests.aiohttp import StreamSession
from ..errors import ResponseError, MissingAuthError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, filter_none
from ...typing import AsyncResult, Messages
from ...requests import raise_for_status
from ...requests.aiohttp import StreamSession
from ...errors import ResponseError, MissingAuthError
class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
if new_text:
yield new_text
else:
yield "\n"
yield "\n"

View File

@ -1,4 +1,7 @@
from .gigachat import *
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .Raycast import Raycast
from .Theb import Theb
@ -7,6 +10,9 @@ from .OpenaiChat import OpenaiChat
from .Poe import Poe
from .Openai import Openai
from .Groq import Groq
from .OpenRouter import OpenRouter
#from .OpenaiAccount import OpenaiAccount
from .PerplexityApi import PerplexityApi
from .Replicate import Replicate
from .MetaAI import MetaAI
#from .MetaAIAccount import MetaAIAccount
from .HuggingFace import HuggingFace

View File

@ -9,10 +9,10 @@ import json
from aiohttp import ClientSession, TCPConnector, BaseConnector
from g4f.requests import raise_for_status
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...errors import MissingAuthError
from ..helper import get_connector
from ....typing import AsyncResult, Messages
from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ....errors import MissingAuthError
from ...helper import get_connector
access_token = ""
token_expires_at = 0

View File

@ -5,9 +5,9 @@ import re
import logging
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):

View File

@ -3,9 +3,9 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_string, format_prompt
class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"

View File

@ -3,10 +3,10 @@ from __future__ import annotations
import json
import base64
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...image import ImageResponse
from ..helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"

View File

@ -4,10 +4,10 @@ import json
import uuid
from aiohttp import ClientSession, ClientTimeout, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..image import ImageResponse
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...image import ImageResponse
class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"

View File

@ -2,10 +2,10 @@ from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from ..requests import get_args_from_browser
from ..webdriver import WebDriver
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ...requests import get_args_from_browser
from ...webdriver import WebDriver
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"

View File

@ -1,10 +1,10 @@
from __future__ import annotations
import re
from ..requests import StreamSession, raise_for_status
from ..typing import Messages
from .base_provider import AsyncProvider, ProviderModelMixin
from .helper import format_prompt
from ...requests import StreamSession, raise_for_status
from ...typing import Messages
from ..base_provider import AsyncProvider, ProviderModelMixin
from ..helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):

View File

@ -3,10 +3,10 @@ from __future__ import annotations
import re
import json
import asyncio
from ..requests import StreamSession, raise_for_status
from ..typing import Messages, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ...requests import StreamSession, raise_for_status
from ...typing import Messages, AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"

View File

@ -5,10 +5,10 @@ import time
import hashlib
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_hex, get_random_string
from ..requests.raise_for_status import raise_for_status
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_hex, get_random_string
from ...requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"

View File

@ -5,14 +5,14 @@ import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',

View File

@ -2,9 +2,9 @@ from __future__ import annotations
import hashlib
import time
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "GPROChat"

View File

@ -4,15 +4,15 @@ import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, get_connector
from ..requests import raise_for_status
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_string, get_connector
from ...requests import raise_for_status
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh/chat"
api_endpoint = "https://koala.sh/api/gpt/"
working = True
working = False
supports_message_history = True
default_model = 'gpt-4o-mini'

View File

@ -0,0 +1,12 @@
from .Ai4Chat import Ai4Chat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .AmigoChat import AmigoChat
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
from .ChatgptFree import ChatgptFree
from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly
from .GPROChat import GPROChat
from .Koala import Koala
from .MyShell import MyShell

View File

@ -1,4 +1,3 @@
from .MyShell import MyShell
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .TalkAi import TalkAi

View File

@ -19,21 +19,16 @@ from .Provider import (
Cloudflare,
DarkAI,
DDG,
DeepInfra,
DeepInfraChat,
Editee,
Free2GPT,
FreeChatgpt,
FreeGpt,
FreeNetfly,
Gemini,
GeminiPro,
GizAI,
GigaChat,
GPROChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
MagickPen,
MetaAI,
@ -55,7 +50,6 @@ from .Provider import (
Pi,
Pizzagpt,
Reka,
Replicate,
ReplicateHome,
RubiksAI,
TeachAnything,
@ -89,8 +83,6 @@ default = Model(
base_provider = "",
best_provider = IterListProvider([
DDG,
FreeChatgpt,
HuggingChat,
Pizzagpt,
ReplicateHome,
Upstage,
@ -102,9 +94,9 @@ default = Model(
ChatGptEs,
ChatifyAI,
Cloudflare,
Editee,
AiMathGPT,
AIUncensored,
DarkAI,
])
)
@ -133,13 +125,13 @@ gpt_35_turbo = Model(
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, Editee, NexraChatGPT, Airforce, ChatGpt, Liaobots, OpenaiChat])
best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, NexraChatGPT, Airforce, ChatGpt, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, RubiksAI, Liaobots, ChatGpt, Airforce, Koala, OpenaiChat])
best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, RubiksAI, Liaobots, ChatGpt, Airforce, OpenaiChat])
)
gpt_4_turbo = Model(
@ -200,13 +192,13 @@ llama_2_13b = Model(
llama_3_8b = Model(
name = "llama-3-8b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, Airforce, DeepInfra, Replicate])
best_provider = IterListProvider([Cloudflare, Airforce])
)
llama_3_70b = Model(
name = "llama-3-70b",
base_provider = "Meta Llama",
best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
best_provider = IterListProvider([ReplicateHome, Airforce])
)
# llama 3.1
@ -219,13 +211,13 @@ llama_3_1_8b = Model(
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "Meta Llama",
best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, AiMathGPT, RubiksAI, Airforce, HuggingFace, PerplexityLabs])
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, TeachAnything, DarkAI, AiMathGPT, RubiksAI, Airforce, HuggingChat, HuggingFace, PerplexityLabs])
)
llama_3_1_405b = Model(
name = "llama-3.1-405b",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, Blackbox, DarkAI, Airforce])
best_provider = IterListProvider([Blackbox, DarkAI, Airforce])
)
# llama 3.2
@ -284,19 +276,19 @@ llamaguard_3_11b = Model(
mistral_7b = Model(
name = "mistral-7b",
base_provider = "Mistral",
best_provider = IterListProvider([DeepInfraChat, Airforce, DeepInfra])
best_provider = IterListProvider([Free2GPT, Airforce])
)
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
best_provider = IterListProvider([DDG, ReplicateHome, DeepInfraChat, Airforce, DeepInfra])
best_provider = IterListProvider([DDG, ReplicateHome, Airforce])
)
mixtral_8x22b = Model(
name = "mixtral-8x22b",
base_provider = "Mistral",
best_provider = IterListProvider([DeepInfraChat, Airforce])
best_provider = IterListProvider([Airforce])
)
mistral_nemo = Model(
@ -305,12 +297,6 @@ mistral_nemo = Model(
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
mistral_large = Model(
name = "mistral-large",
base_provider = "Mistral",
best_provider = IterListProvider([Editee])
)
### NousResearch ###
hermes_2 = Model(
@ -342,7 +328,7 @@ phi_2 = Model(
phi_3_medium_4k = Model(
name = "phi-3-medium-4k",
base_provider = "Microsoft",
best_provider = DeepInfraChat
best_provider = None
)
phi_3_5_mini = Model(
@ -356,7 +342,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, Editee, Airforce, Liaobots])
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, FreeGpt, NexraGeminiPro, Airforce, Liaobots])
)
gemini_flash = Model(
@ -381,7 +367,7 @@ gemma_2b = Model(
gemma_2b_27b = Model(
name = 'gemma-2b-27b',
base_provider = 'Google',
best_provider = IterListProvider([DeepInfraChat, Airforce])
best_provider = IterListProvider([Airforce])
)
gemma_7b = Model(
@ -428,7 +414,7 @@ claude_3_haiku = Model(
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Editee, Liaobots])
best_provider = IterListProvider([Blackbox, Liaobots])
)
@ -458,7 +444,7 @@ blackboxai_pro = Model(
dbrx_instruct = Model(
name = 'dbrx-instruct',
base_provider = 'Databricks',
best_provider = IterListProvider([Airforce, DeepInfra])
best_provider = IterListProvider([Airforce])
)
@ -470,14 +456,6 @@ command_r_plus = Model(
)
### iFlytek ###
sparkdesk_v1_1 = Model(
name = 'sparkdesk-v1.1',
base_provider = 'iFlytek',
best_provider = FreeChatgpt
)
### Qwen ###
# qwen 1_5
qwen_1_5_5b = Model(
@ -501,7 +479,7 @@ qwen_1_5_8b = Model(
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
best_provider = IterListProvider([Cloudflare, FreeChatgpt])
best_provider = IterListProvider([Cloudflare])
)
# qwen 2
@ -529,28 +507,6 @@ qwen = Model(
best_provider = NexraQwen
)
### Zhipu AI ###
glm_3_6b = Model(
name = 'glm-3-6b',
base_provider = 'Zhipu AI',
best_provider = FreeChatgpt
)
glm_4_9b = Model(
name = 'glm-4-9B',
base_provider = 'Zhipu AI',
best_provider = FreeChatgpt
)
### 01-ai ###
yi_1_5_9b = Model(
name = 'yi-1.5-9b',
base_provider = '01-ai',
best_provider = FreeChatgpt
)
### Upstage ###
solar_10_7b = Model(
name = 'solar-10-7b',
@ -586,12 +542,6 @@ deepseek_coder = Model(
)
### WizardLM ###
wizardlm_2_7b = Model(
name = 'wizardlm-2-7b',
base_provider = 'WizardLM',
best_provider = DeepInfraChat
)
wizardlm_2_8x22b = Model(
name = 'wizardlm-2-8x22b',
base_provider = 'WizardLM',
@ -610,7 +560,7 @@ llava_13b = Model(
minicpm_llama_3_v2_5 = Model(
name = 'minicpm-llama-3-v2.5',
base_provider = 'OpenBMB',
best_provider = DeepInfraChat
best_provider = None
)
@ -618,7 +568,7 @@ minicpm_llama_3_v2_5 = Model(
lzlv_70b = Model(
name = 'lzlv-70b',
base_provider = 'Lzlv',
best_provider = DeepInfraChat
best_provider = None
)
@ -626,7 +576,7 @@ lzlv_70b = Model(
openchat_3_6_8b = Model(
name = 'openchat-3.6-8b',
base_provider = 'OpenChat',
best_provider = DeepInfraChat
best_provider = None
)
@ -634,7 +584,7 @@ openchat_3_6_8b = Model(
phind_codellama_34b_v2 = Model(
name = 'phind-codellama-34b-v2',
base_provider = 'Phind',
best_provider = DeepInfraChat
best_provider = None
)
@ -642,7 +592,7 @@ phind_codellama_34b_v2 = Model(
dolphin_2_9_1_llama_3_70b = Model(
name = 'dolphin-2.9.1-llama-3-70b',
base_provider = 'Cognitive Computations',
best_provider = DeepInfraChat
best_provider = None
)
@ -659,6 +609,12 @@ grok_2_mini = Model(
best_provider = Liaobots
)
grok_beta = Model(
name = 'grok-beta',
base_provider = 'x.ai',
best_provider = Liaobots
)
### Perplexity AI ###
sonar_online = Model(
@ -939,7 +895,6 @@ class ModelUtils:
'mixtral-8x7b': mixtral_8x7b,
'mixtral-8x22b': mixtral_8x22b,
'mistral-nemo': mistral_nemo,
'mistral-large': mistral_large,
### NousResearch ###
@ -1001,36 +956,24 @@ class ModelUtils:
### GigaChat ###
'gigachat': gigachat,
### iFlytek ###
'sparkdesk-v1.1': sparkdesk_v1_1,
### Qwen ###
'qwen': qwen,
# qwen-1.5
# qwen 1.5
'qwen-1.5-5b': qwen_1_5_5b,
'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-8b': qwen_1_5_8b,
'qwen-1.5-14b': qwen_1_5_14b,
# qwen-2
# qwen 2
'qwen-2-72b': qwen_2_72b,
# qwen-2-5
# qwen 2-5
'qwen-2-5-7b': qwen_2_5_7b,
'qwen-2-5-72b': qwen_2_5_72b,
### Zhipu AI ###
'glm-3-6b': glm_3_6b,
'glm-4-9b': glm_4_9b,
### 01-ai ###
'yi-1.5-9b': yi_1_5_9b,
### Upstage ###
'solar-10-7b': solar_10_7b,
@ -1051,7 +994,6 @@ class ModelUtils:
### WizardLM ###
'wizardlm-2-7b': wizardlm_2_7b,
'wizardlm-2-8x22b': wizardlm_2_8x22b,
@ -1078,6 +1020,7 @@ class ModelUtils:
### x.ai ###
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
'grok-beta': grok_beta,
### Perplexity AI ###