Merge pull request #2262 from kqlio67/main

Enhance G4F: Provider Removal, Documentation Updates, and Provider Enhancements
This commit is contained in:
Tekky 2024-10-03 13:20:46 +02:00 committed by GitHub
commit 6d19ba6956
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 655 additions and 446 deletions

View File

@ -14,19 +14,18 @@
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`llama-2-13b, llama-3-70b, llama-3-8b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, mixtral-8x7b, mixtral-8x22b, mistral-7b, mixtral-8x7b-dpo, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2b-9b, gemma-2b-27b, deepseek, yi-34b, wizardlm-2-8x22b, solar-10-7b, sh-n-7b, sparkdesk-v1.1,gpt-4o, gpt-4o-mini, gpt-3.5-turbo, gemini-flash, gemini-pro, dbrx-instruct`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, gpt-4o, claude-3-haiku, claude-3-sonnet, claude-3-5-sonnet, claude-3-opus, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, mixtral-8x7b mixtral-8x22b, mistral-7b, qwen-1.5-7b, qwen-1.5-14b, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2-9b, gemma-2-27b, gemini-flash, gemini-pro, deepseek, mixtral-8x7b-dpo, yi-34b, wizardlm-2-8x22b, solar-10.7b, mythomax-l2-13b, cosmosrp`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[chat18.aichatos8.com](https://chat18.aichatos8.com)|`g4f.Provider.Binjie`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.bixin123.com](https://chat.bixin123.com)|`g4f.Provider.Bixin123`|`gpt-3.5-turbo, gpt-4-turbo, qwen-turbo`|❌|❌||![Inactive](https://img.shields.io/badge/Inactive-red)||
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackbox, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|✔|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackbox, gemini-flash, llama-3.1-8b, llama-3.1-70b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|✔|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgot.one](https://www.chatgot.one/)|`g4f.Provider.ChatGot`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|`gpt-4o-mini`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[app.chathub.gg](https://app.chathub.gg)|`g4f.Provider.ChatHub`|`llama-3.1-8b, mixtral-8x7b, gemma-2, sonar-online`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -43,7 +42,7 @@
|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[gprochat.com](https://gprochat.com)|`g4f.Provider.GPROChat`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, mixtral-8x7b, mixtral-8x7b-dpo, mistral-7b, phi-3-mini-4k`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -82,10 +81,10 @@
|Model|Base Provider|Provider|Website|
|--|--|--|-|
|gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-base)|
|gpt-3.5-turbo|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
|gpt-4|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4-turbo|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4o-mini|OpenAI|12+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|gpt-3.5-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
|gpt-4|OpenAI|28+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4o-mini|OpenAI|11+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|gpt-4o|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|llama-2-13b|Meta Llama|1+ Providers|[llama.com](https://www.llama.com/llama2/)|
|llama-3|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
@ -94,26 +93,30 @@
|llama-3.1|Meta Llama|16+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-8b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-70b|Meta Llama|9+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-405b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-405b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.2-90b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
|llamaguard-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/LlamaGuard-7b)|
|llamaguard-2-8b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-Guard-2-8B)|
|mistral-7b|Mistral AI|3+ Providers|[mistral.ai](https://mistral.ai/news/announcing-mistral-7b/)|
|mixtral-8x7b|Mistral AI|6+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mixtral-8x22b|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-8x22b/)|
|mixtral-8x7b-dpo|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|gemini|Google DeepMind|9+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|gemini-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-pro|Google DeepMind|6+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemma-2b|Google|4+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)|
|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)|
|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)|
|gemma-2|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|claude-2|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|gemma-2|Google|2+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|gemma_2_27b|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|claude-2|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|claude-2.0|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|claude-3|Anthropic|5+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3-opus|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3.5|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3.5-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3|Anthropic|7+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-haiku|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3.5|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3.5-sonnet|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|blackbox|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|yi-34b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-34B)|
|yi-1.5-9b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-1.5-9B)|
@ -121,9 +124,10 @@
|phi-3-medium-4k|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)|
|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)|
|command-r-plus|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)|
|sparkdesk-v1.1|iFlytek|2+ Providers|[xfyun.cn](https://www.xfyun.cn/doc/spark/Guide.html)|
|qwen|Qwen|7+ Providers|[huggingface.co](https://huggingface.co/Qwen)|
|qwen-1.5-14b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-14B)|
|sparkdesk-v1.1|iFlytek|1+ Providers|[xfyun.cn](https://www.xfyun.cn/doc/spark/Guide.html)|
|qwen|Qwen|6+ Providers|[huggingface.co](https://huggingface.co/Qwen)|
|qwen-1.5-14b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-14B)|
|qwen-1.5-7b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)|
|qwen-1.5-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-72B)|
|qwen-2-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
|qwen-turbo|Qwen|1+ Providers|[qwenlm.github.io](https://qwenlm.github.io/blog/qwen2.5/)|
@ -148,6 +152,8 @@
|grok-2|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
|sonar-online|Perplexity AI|2+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|mythomax-l2-13b|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/Gryphe/MythoMax-L2-13b)|
|cosmosrp|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/PawanKrd/CosmosRP-8k)|
---
### ImageModel
|Model|Base Provider|Provider|Website|

View File

@ -1,76 +1,199 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError
import random
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
from .helper import format_prompt
from ..errors import ResponseStatusError
def split_long_message(message: str, max_length: int = 4000) -> list[str]:
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
text_api_endpoint = "https://api.airforce/chat/completions"
image_api_endpoint = "https://api.airforce/imagine2"
text_api_endpoint = "https://api.airforce/chat/completions"
working = True
default_model = 'llama-3-70b-chat'
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'llama-3-70b-chat'
text_models = [
# Open source models
'llama-2-13b-chat',
'llama-3-70b-chat',
'llama-3-70b-chat-turbo',
'llama-3-70b-chat-lite',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-8b-chat-lite',
'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
'LlamaGuard-2-8b',
'Llama-Guard-7b',
'Meta-Llama-Guard-3-8B',
'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',
'Qwen1.5-72B-Chat',
'Qwen1.5-110B-Chat',
'Qwen2-72B-Instruct',
'gemma-2b-it',
'gemma-2-9b-it',
'gemma-2-27b-it',
'dbrx-instruct',
'deepseek-llm-67b-chat',
'Nous-Hermes-2-Mixtral-8x7B-DPO',
'Nous-Hermes-2-Yi-34B',
'WizardLM-2-8x22B',
'SOLAR-10.7B-Instruct-v1.0',
'StripedHyena-Nous-7B',
'sparkdesk',
# Other models
'chatgpt-4o-latest',
'gpt-4',
'gpt-4-turbo',
'gpt-4o-mini-2024-07-18',
'gpt-4o-mini',
'gpt-4o',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613',
'gemini-1.5-flash',
'gemini-1.5-pro',
]
text_models = [
# anthorpic
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
# openai
'chatgpt-4o-latest',
'gpt-4',
#'gpt-4-0613',
'gpt-4-turbo',
'gpt-4o-mini-2024-07-18',
'gpt-4o-mini',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
#'gpt-3.5-turbo-16k', # No response from the API.
#'gpt-3.5-turbo-0613', # No response from the API.
#'gpt-3.5-turbo-16k-0613', # No response from the API.
'gpt-4o',
#'o1-mini', # No response from the API.
# meta-llama
'llama-3-70b-chat',
'llama-3-70b-chat-turbo',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-70b-chat-lite',
'llama-3-8b-chat-lite',
#'llama-2-70b-chat', # Failed to load response after multiple retries.
'llama-2-13b-chat',
#'llama-2-7b-chat', # Failed to load response after multiple retries.
'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
'LlamaGuard-2-8b',
'Llama-Guard-7b',
'Llama-3.2-90B-Vision-Instruct-Turbo',
# codellama
#'CodeLlama-7b-Python-hf', # Failed to load response after multiple retries.
#'CodeLlama-7b-Python',
#'CodeLlama-13b-Python-hf', # Failed to load response after multiple retries.
#'CodeLlama-34b-Python-hf', # Failed to load response after multiple retries.
#'CodeLlama-70b-Python-hf', # Failed to load response after multiple retries.
# 01-ai
#'Yi-34B-Chat', # Failed to load response after multiple retries.
#'Yi-34B', # Failed to load response after multiple retries.
#'Yi-6B', # Failed to load response after multiple retries.
# mistral-ai
#'Mixtral-8x7B-v0.1',
#'Mixtral-8x22B', # Failed to load response after multiple retries.
'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',
# openchat
#'openchat-3.5', # Failed to load response after multiple retries.
# wizardlm
#'WizardLM-13B-V1.2', # Failed to load response after multiple retries.
#'WizardCoder-Python-34B-V1.0', # Failed to load response after multiple retries.
# qwen
#'Qwen1.5-0.5B-Chat', # Failed to load response after multiple retries.
#'Qwen1.5-1.8B-Chat', # Failed to load response after multiple retries.
#'Qwen1.5-4B-Chat', # Failed to load response after multiple retries.
'Qwen1.5-7B-Chat',
'Qwen1.5-14B-Chat',
'Qwen1.5-72B-Chat',
'Qwen1.5-110B-Chat',
'Qwen2-72B-Instruct',
# google
'gemma-2b-it',
#'gemma-7b-it', # Failed to load response after multiple retries.
#'gemma-2b', # Failed to load response after multiple retries.
#'gemma-7b', # Failed to load response after multiple retries.
'gemma-2-9b-it', # fix bug
'gemma-2-27b-it',
# gemini
'gemini-1.5-flash',
'gemini-1.5-pro',
# databricks
'dbrx-instruct',
# lmsys
#'vicuna-7b-v1.5', # Failed to load response after multiple retries.
#'vicuna-13b-v1.5', # Failed to load response after multiple retries.
# cognitivecomputations
#'dolphin-2.5-mixtral-8x7b', # Failed to load response after multiple retries.
# deepseek-ai
#'deepseek-coder-33b-instruct', # No response from the API.
#'deepseek-coder-67b-instruct', # Failed to load response after multiple retries.
'deepseek-llm-67b-chat',
# NousResearch
#'Nous-Capybara-7B-V1p9', # Failed to load response after multiple retries.
'Nous-Hermes-2-Mixtral-8x7B-DPO',
#'Nous-Hermes-2-Mixtral-8x7B-SFT', # Failed to load response after multiple retries.
#'Nous-Hermes-llama-2-7b', # Failed to load response after multiple retries.
#'Nous-Hermes-Llama2-13b', # Failed to load response after multiple retries.
'Nous-Hermes-2-Yi-34B',
# Open-Orca
#'Mistral-7B-OpenOrca', # Failed to load response after multiple retries.
# togethercomputer
#'alpaca-7b', # Failed to load response after multiple retries.
# teknium
#'OpenHermes-2-Mistral-7B', # Failed to load response after multiple retries.
#'OpenHermes-2.5-Mistral-7B', # Failed to load response after multiple retries.
# microsoft
'WizardLM-2-8x22B',
# Nexusflow
#'NexusRaven-V2-13B', # Failed to load response after multiple retries.
# Phind
#'Phind-CodeLlama-34B-v2', # Failed to load response after multiple retries.
# Snoflake
#'snowflake-arctic-instruct', # No response from the API.
# upstage
'SOLAR-10.7B-Instruct-v1.0',
# togethercomputer
#'StripedHyena-Hessian-7B', # Failed to load response after multiple retries.
#'StripedHyena-Nous-7B', # Failed to load response after multiple retries.
#'Llama-2-7B-32K-Instruct', # Failed to load response after multiple retries.
#'CodeLlama-13b-Instruct', # No response from the API.
#'evo-1-131k-base', # Failed to load response after multiple retries.
#'OLMo-7B-Instruct', # Failed to load response after multiple retries.
# garage-bAInd
#'Platypus2-70B-instruct', # Failed to load response after multiple retries.
# snorkelai
#'Snorkel-Mistral-PairRM-DPO', # Failed to load response after multiple retries.
# Undi95
#'ReMM-SLERP-L2-13B', # Failed to load response after multiple retries.
# Gryphe
'MythoMax-L2-13b',
# Autism
#'chronos-hermes-13b', # Failed to load response after multiple retries.
# Undi95
#'Toppy-M-7B', # Failed to load response after multiple retries.
# iFlytek
#'sparkdesk', # Failed to load response after multiple retries.
# pawan
'cosmosrp',
]
image_models = [
'flux',
'flux-realism',
@ -85,158 +208,206 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
models = [
*text_models,
*image_models
*image_models,
]
model_aliases = {
# Open source models
"llama-2-13b": "llama-2-13b-chat",
# anthorpic
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-opus": "claude-3-opus-20240229",
# openai
"gpt-4o": "chatgpt-4o-latest",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
# meta-llama
"llama-3-70b": "llama-3-70b-chat",
"llama-3-70b": "llama-3-70b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat-lite",
"llama-2-13b": "llama-2-13b-chat",
"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
"llamaguard-2-8b": "LlamaGuard-2-8b",
"llamaguard-7b": "Llama-Guard-7b",
"llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
# mistral-ai
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.2",
"mistral-7b": "Mistral-7B-Instruct-v0.3",
"mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
# qwen
"qwen-1.5-7b": "Qwen1.5-7B-Chat",
"qwen-1.5-14b": "Qwen1.5-14B-Chat",
"qwen-1.5-72b": "Qwen1.5-72B-Chat",
"qwen-1.5-110b": "Qwen1.5-110B-Chat",
"qwen-2-72b": "Qwen2-72B-Instruct",
"gemma-2b": "gemma-2b-it",
"gemma-2b-9b": "gemma-2-9b-it",
"gemma-2b-27b": "gemma-2-27b-it",
"deepseek": "deepseek-llm-67b-chat",
"yi-34b": "Nous-Hermes-2-Yi-34B",
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
"solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
"sh-n-7b": "StripedHyena-Nous-7B",
"sparkdesk-v1.1": "sparkdesk",
# Other models
"gpt-4o": "chatgpt-4o-latest",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
# google
"gemma-2b": "gemma-2b-it",
"gemma-2-9b": "gemma-2-9b-it",
"gemma-2-27b": "gemma-2-27b-it",
# gemini
"gemini-flash": "gemini-1.5-flash",
"gemini-pro": "gemini-1.5-pro",
# Image models
"dalle-3": "dall-e-3",
# deepseek-ai
"deepseek": "deepseek-llm-67b-chat",
# NousResearch
"mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"yi-34b": "Nous-Hermes-2-Yi-34B",
# microsoft
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
# upstage
"solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
# Gryphe
"mythomax-l2-13b": "MythoMax-L2-13b",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases.get(model, cls.default_model)
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
seed: int = None,
size: str = "1:1",
stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
# If the model is an image model, use the image API
if model in cls.image_models:
async for result in cls._generate_image(model, messages, proxy, seed, size):
yield result
# If the model is a text model, use the text API
elif model in cls.text_models:
async for result in cls._generate_text(model, messages, proxy, stream):
yield result
@classmethod
async def _generate_image(
cls,
model: str,
messages: Messages,
proxy: str = None,
seed: int = None,
size: str = "1:1",
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://api.airforce",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
"authorization": "Bearer null",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://llmplayground.net/",
"sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"origin": "https://llmplayground.net",
"user-agent": "Mozilla/5.0"
}
if model in cls.image_models:
async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
yield item
else:
async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
yield item
if seed is None:
seed = random.randint(0, 100000)
@classmethod
async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
async with ClientSession() as session:
data = {
"messages": [{"role": "user", "content": message['content']} for message in messages],
# Assume the first message is the prompt for the image
prompt = messages[0]['content']
async with ClientSession(headers=headers) as session:
params = {
"model": model,
"max_tokens": kwargs.get('max_tokens', 4096),
"temperature": kwargs.get('temperature', 1),
"top_p": kwargs.get('top_p', 1),
"stream": True
"prompt": prompt,
"size": size,
"seed": seed
}
async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
response.raise_for_status()
content_type = response.headers.get('Content-Type', '').lower()
try:
async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: "):
if line == "data: [DONE]":
break
try:
data = json.loads(line[6:])
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
content = delta['content']
if "One message exceeds the 1000chars per message limit" in content:
raise ResponseStatusError(
"Message too long",
400,
"Please try a shorter message."
)
yield content
except json.JSONDecodeError:
continue
except ResponseStatusError as e:
raise e
except Exception as e:
raise ResponseStatusError(str(e), 500, "An unexpected error occurred")
if 'application/json' in content_type:
async for chunk in response.content.iter_chunked(1024):
if chunk:
yield chunk.decode('utf-8')
elif 'image' in content_type:
image_data = b""
async for chunk in response.content.iter_chunked(1024):
if chunk:
image_data += chunk
image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
alt_text = f"Generated image for prompt: {prompt}"
yield ImageResponse(images=image_url, alt=alt_text)
@classmethod
async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
prompt = messages[-1]['content'] if messages else ""
params = {
"prompt": prompt,
"size": kwargs.get("size", "1:1"),
"seed": kwargs.get("seed"),
"model": model
async def _generate_text(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"authorization": "Bearer missing api key",
"content-type": "application/json",
"user-agent": "Mozilla/5.0"
}
params = {k: v for k, v in params.items() if v is not None}
try:
async with ClientSession(headers=headers) as session:
async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
async with ClientSession(headers=headers) as session:
formatted_prompt = cls._format_messages(messages)
prompt_parts = split_long_message(formatted_prompt)
full_response = ""
for part in prompt_parts:
data = {
"messages": [{"role": "user", "content": part}],
"model": model,
"max_tokens": 4096,
"temperature": 1,
"top_p": 1,
"stream": stream
}
async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
content = await response.read()
if response.content_type.startswith('image/'):
image_url = str(response.url)
yield ImageResponse(image_url, prompt)
part_response = ""
if stream:
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
json_data = json.loads(line[6:])
content = json_data['choices'][0]['delta'].get('content', '')
part_response += content
else:
try:
text = content.decode('utf-8', errors='ignore')
raise ResponseStatusError("Image generation failed", response.status, text)
except Exception as decode_error:
raise ResponseStatusError("Decoding error", 500, str(decode_error))
except ClientResponseError as e:
raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
except Exception as e:
raise ResponseStatusError("Unexpected error", 500, str(e))
json_data = await response.json()
content = json_data['choices'][0]['message']['content']
part_response = content
full_response += part_response
yield full_response
@classmethod
def _format_messages(cls, messages: Messages) -> str:
"""Formats messages for text generation."""
return " ".join([msg['content'] for msg in messages])

View File

@ -9,8 +9,8 @@ from .helper import format_prompt
class Allyfy(AsyncGeneratorProvider):
url = "https://chatbot.allyfy.chat"
api_endpoint = "/api/v1/message/stream/super/chat"
url = "https://allyfy.chat"
api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
supports_gpt_35_turbo = True
@ -53,7 +53,7 @@ class Allyfy(AsyncGeneratorProvider):
"packageName": "com.cch.allyfy.webh",
}
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:

View File

@ -1,94 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
import random
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from .helper import format_prompt
class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.bixin123.com"
api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-3.5-turbo-0125'
models = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
model_aliases = {
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
def generate_fingerprint(cls) -> str:
return str(random.randint(100000000, 999999999))
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"fingerprint": cls.generate_fingerprint(),
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/chat",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"x-website-domain": "chat.bixin123.com",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"options": {
"usingNetwork": False,
"file": ""
}
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_text = await response.text()
lines = response_text.strip().split("\n")
last_json = None
for line in reversed(lines):
try:
last_json = json.loads(line)
break
except json.JSONDecodeError:
pass
if last_json:
text = last_json.get("text", "")
yield text
else:
yield ""

View File

@ -25,18 +25,10 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b',
'llama-3.1-405b',
'ImageGenerationLV45LJp',
'GPT-4o',
'Gemini-PRO',
'Claude-Sonnet-3.5',
'gpt-4o',
'gemini-pro',
'claude-sonnet-3.5',
]
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"flux": "ImageGenerationLV45LJp",
"gpt-4o": "GPT-4o",
"gemini-pro": "Gemini-PRO",
"claude-3.5-sonnet": "Claude-Sonnet-3.5",
}
agentMode = {
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
@ -51,9 +43,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
userSelectedModel = {
"GPT-4o": "GPT-4o",
"Gemini-PRO": "Gemini-PRO",
'Claude-Sonnet-3.5': "Claude-Sonnet-3.5",
"gpt-4o": "gpt-4o",
"gemini-pro": "gemini-pro",
'claude-sonnet-3.5': "claude-sonnet-3.5",
}
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"flux": "ImageGenerationLV45LJp",
}
@classmethod
@ -119,8 +116,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"agentMode": {},
"trendingAgentMode": {},
"userSelectedModel": None,
"userSystemPrompt": None,
"isMicMode": False,
"maxTokens": 99999999,
"maxTokens": 1024,
"playgroundTopP": 0.9,
"playgroundTemperature": 0.5,
"isChromeExt": False,

View File

@ -134,11 +134,21 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
}
response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
headers=headers, json={'p': pow_req}).json()
headers=headers, json={'p': pow_req})
turnstile = response.get('turnstile', {})
if response.status_code != 200:
print(f"Request failed with status: {response.status_code}")
print(f"Response content: {response.content}")
return
response_data = response.json()
if "detail" in response_data and "Unusual activity" in response_data["detail"]:
print(f"Blocked due to unusual activity: {response_data['detail']}")
return
turnstile = response_data.get('turnstile', {})
turnstile_required = turnstile.get('required')
pow_conf = response.get('proofofwork', {})
pow_conf = response_data.get('proofofwork', {})
if turnstile_required:
turnstile_dx = turnstile.get('dx')
@ -146,7 +156,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
headers = headers | {
'openai-sentinel-turnstile-token' : turnstile_token,
'openai-sentinel-chat-requirements-token': response.get('token'),
'openai-sentinel-chat-requirements-token': response_data.get('token'),
'openai-sentinel-proof-token' : get_answer_token(
pow_conf.get('seed'), pow_conf.get('difficulty'), config
)
@ -187,20 +197,29 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
'screen_width': random.randint(1200, 2000),
},
}
time.sleep(2)
response = session.post('https://chatgpt.com/backend-anon/conversation',
headers=headers, json=json_data, stream=True)
replace = ''
for line in response.iter_lines():
if line:
if 'DONE' in line.decode():
break
data = json.loads(line.decode()[6:])
if data.get('message').get('author').get('role') == 'assistant':
tokens = (data.get('message').get('content').get('parts')[0])
yield tokens.replace(replace, '')
replace = tokens
decoded_line = line.decode()
print(f"Received line: {decoded_line}")
if decoded_line.startswith('data:'):
json_string = decoded_line[6:]
if json_string.strip():
try:
data = json.loads(json_string)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}, content: {json_string}")
continue
if data.get('message').get('author').get('role') == 'assistant':
tokens = (data.get('message').get('content').get('parts')[0])
yield tokens.replace(replace, '')
replace = tokens

View File

@ -17,6 +17,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
'microsoft/Phi-3.5-mini-instruct',
@ -26,6 +27,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",

View File

@ -9,6 +9,15 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 48000,
"tokenLimit": 14000,
"context": "16K",
},
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",

View File

@ -1,12 +1,11 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError
import base64
import re
from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://lite.icoding.ink"
api_endpoint = "/api/v1/gpt/message"
@ -27,18 +26,20 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
}
bearer_tokens = [
"aa3020ee873e40cb8b3f515a0708ebc4",
"5d69cd271b144226ac1199b3c849a566",
"62977f48a95844f8853a953679401850",
"d815b091959e42dd8b7871dfaf879485"
"NWQ2OWNkMjcxYjE0NDIyNmFjMTE5OWIzYzg0OWE1NjY=",
"ZDgxNWIwOTU5NTk0ZTRkZDhiNzg3MWRmYWY4Nzk0ODU="
]
current_token_index = 0
@classmethod
def decode_token(cls, encoded_token: str) -> str:
return base64.b64decode(encoded_token).decode('utf-8')
@classmethod
def get_next_bearer_token(cls):
token = cls.bearer_tokens[cls.current_token_index]
encoded_token = cls.bearer_tokens[cls.current_token_index]
cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
return token
return cls.decode_token(encoded_token)
@classmethod
async def create_async_generator(
@ -95,9 +96,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
buffer = ""
full_response = ""
def decode_content(data):
bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
return bytes_array.decode('utf-8')
async for chunk in response.content.iter_any():
if chunk:
buffer += chunk.decode()

View File

@ -24,21 +24,18 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
async def fetch_api_credentials(cls) -> tuple:
url = "https://magickpen.com/_nuxt/9e47cd7579e60a9d1f13.js"
url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
async with ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
# Extract the necessary values from the file
pattern = r'"X-API-Secret":"(\w+)"'
match = re.search(pattern, text)
X_API_SECRET = match.group(1) if match else None
# Generate timestamp and nonce
timestamp = str(int(time.time() * 1000)) # in milliseconds
timestamp = str(int(time.time() * 1000))
nonce = str(random.random())
# Generate the signature
s = ["TGDBU9zCgM", timestamp, nonce]
s.sort()
signature_string = ''.join(s)

View File

@ -19,7 +19,6 @@ from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Binjie import Binjie
from .Bixin123 import Bixin123
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .ChatGpt import ChatGpt

View File

@ -33,6 +33,12 @@ except NameError:
except StopAsyncIteration:
raise StopIteration
async def safe_aclose(generator):
try:
await generator.aclose()
except Exception as e:
logging.warning(f"Error while closing generator: {e}")
async def iter_response(
response: AsyncIterator[str],
stream: bool,
@ -45,48 +51,56 @@ async def iter_response(
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
idx = 0
async for chunk in response:
if isinstance(chunk, FinishReason):
finish_reason = chunk.reason
break
elif isinstance(chunk, BaseConversation):
yield chunk
continue
try:
async for chunk in response:
if isinstance(chunk, FinishReason):
finish_reason = chunk.reason
break
elif isinstance(chunk, BaseConversation):
yield chunk
continue
content += str(chunk)
idx += 1
content += str(chunk)
idx += 1
if max_tokens is not None and idx >= max_tokens:
finish_reason = "length"
if max_tokens is not None and idx >= max_tokens:
finish_reason = "length"
first, content, chunk = find_stop(stop, content, chunk if stream else None)
first, content, chunk = find_stop(stop, content, chunk if stream else None)
if first != -1:
finish_reason = "stop"
if first != -1:
finish_reason = "stop"
if stream:
yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
finally:
if hasattr(response, 'aclose'):
await safe_aclose(response)
async def iter_append_model_and_provider(response: AsyncIterator) -> AsyncIterator:
last_provider = None
async for chunk in response:
last_provider = get_last_provider(True) if last_provider is None else last_provider
chunk.model = last_provider.get("model")
chunk.provider = last_provider.get("name")
yield chunk
try:
async for chunk in response:
last_provider = get_last_provider(True) if last_provider is None else last_provider
chunk.model = last_provider.get("model")
chunk.provider = last_provider.get("name")
yield chunk
finally:
if hasattr(response, 'aclose'):
await safe_aclose(response)
class AsyncClient(BaseClient):
def __init__(
@ -158,8 +172,6 @@ class Completions:
response = iter_append_model_and_provider(response)
return response if stream else await anext(response)
class Chat:
completions: Completions
@ -168,14 +180,18 @@ class Chat:
async def iter_image_response(response: AsyncIterator) -> Union[ImagesResponse, None]:
logging.info("Starting iter_image_response")
async for chunk in response:
logging.info(f"Processing chunk: {chunk}")
if isinstance(chunk, ImageProviderResponse):
logging.info("Found ImageProviderResponse")
return ImagesResponse([Image(image) for image in chunk.get_list()])
logging.warning("No ImageProviderResponse found in the response")
return None
try:
async for chunk in response:
logging.info(f"Processing chunk: {chunk}")
if isinstance(chunk, ImageProviderResponse):
logging.info("Found ImageProviderResponse")
return ImagesResponse([Image(image) for image in chunk.get_list()])
logging.warning("No ImageProviderResponse found in the response")
return None
finally:
if hasattr(response, 'aclose'):
await safe_aclose(response)
async def create_image(client: AsyncClient, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
logging.info(f"Creating image with provider: {provider}, model: {model}, prompt: {prompt}")
@ -220,12 +236,25 @@ class Images:
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
logging.info("Using AsyncGeneratorProvider")
messages = [{"role": "user", "content": prompt}]
async for response in provider.create_async_generator(model, messages, **kwargs):
if isinstance(response, ImageResponse):
return self._process_image_response(response)
elif isinstance(response, str):
image_response = ImageResponse([response], prompt)
return self._process_image_response(image_response)
generator = None
try:
generator = provider.create_async_generator(model, messages, **kwargs)
async for response in generator:
logging.debug(f"Received response: {type(response)}")
if isinstance(response, ImageResponse):
return self._process_image_response(response)
elif isinstance(response, str):
image_response = ImageResponse([response], prompt)
return self._process_image_response(image_response)
except RuntimeError as e:
if "async generator ignored GeneratorExit" in str(e):
logging.warning("Generator ignored GeneratorExit, handling gracefully")
else:
raise
finally:
if generator and hasattr(generator, 'aclose'):
await safe_aclose(generator)
logging.info("AsyncGeneratorProvider processing completed")
elif hasattr(provider, 'create'):
logging.info("Using provider's create method")
async_create = asyncio.iscoroutinefunction(provider.create)
@ -241,7 +270,7 @@ class Images:
return self._process_image_response(image_response)
elif hasattr(provider, 'create_completion'):
logging.info("Using provider's create_completion method")
response = await create_image(provider, prompt, model, **kwargs)
response = await create_image(self.client, provider, prompt, model, **kwargs)
async for chunk in response:
if isinstance(chunk, ImageProviderResponse):
logging.info("Found ImageProviderResponse")
@ -277,12 +306,24 @@ class Images:
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
messages = [{"role": "user", "content": "create a variation of this image"}]
image_data = to_data_uri(image)
async for response in provider.create_async_generator(model, messages, image=image_data, **kwargs):
if isinstance(response, ImageResponse):
return self._process_image_response(response)
elif isinstance(response, str):
image_response = ImageResponse([response], "Image variation")
return self._process_image_response(image_response)
generator = None
try:
generator = provider.create_async_generator(model, messages, image=image_data, **kwargs)
async for response in generator:
if isinstance(response, ImageResponse):
return self._process_image_response(response)
elif isinstance(response, str):
image_response = ImageResponse([response], "Image variation")
return self._process_image_response(image_response)
except RuntimeError as e:
if "async generator ignored GeneratorExit" in str(e):
logging.warning("Generator ignored GeneratorExit in create_variation, handling gracefully")
else:
raise
finally:
if generator and hasattr(generator, 'aclose'):
await safe_aclose(generator)
logging.info("AsyncGeneratorProvider processing completed in create_variation")
elif hasattr(provider, 'create_variation'):
if asyncio.iscoroutinefunction(provider.create_variation):
response = await provider.create_variation(image, **kwargs)
@ -296,5 +337,3 @@ class Images:
return self._process_image_response(image_response)
else:
raise ValueError(f"Provider {provider} does not support image variation")
raise NoImageResponseError("Failed to create image variation")

View File

@ -9,10 +9,8 @@ from .Provider import (
Allyfy,
Bing,
Binjie,
Bixin123,
Blackbox,
ChatGpt,
Chatgpt4o,
Chatgpt4Online,
ChatGptEs,
ChatgptFree,
@ -80,12 +78,16 @@ default = Model(
ReplicateHome,
Upstage,
Blackbox,
Bixin123,
Binjie,
Free2GPT,
MagickPen,
DeepInfraChat,
LiteIcoding,
Airforce,
ChatHub,
Nexra,
ChatGptEs,
ChatHub,
])
)
@ -106,7 +108,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Allyfy, Nexra, Bixin123, Airforce,
Allyfy, Nexra, Airforce, Liaobots,
])
)
@ -115,7 +117,7 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots, Nexra, Airforce, Chatgpt4o, ChatGptEs,
Liaobots, Nexra, ChatGptEs, Airforce,
OpenaiChat
])
)
@ -133,7 +135,7 @@ gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Nexra, Bixin123, Liaobots, Airforce, Bing
Nexra, Liaobots, Airforce, Bing
])
)
@ -141,9 +143,8 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Nexra, Binjie, Airforce,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider,
Chatgpt4Online, Bing, OpenaiChat,
Nexra, Binjie, Airforce, Chatgpt4Online, Bing, OpenaiChat,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
])
)
@ -205,7 +206,7 @@ llama_3_1_70b = Model(
llama_3_1_405b = Model(
name = "llama-3.1-405b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, DeepInfraChat, Airforce])
best_provider = IterListProvider([DeepInfraChat, Blackbox, Airforce])
)
llama_3_1 = Model(
@ -214,12 +215,38 @@ llama_3_1 = Model(
best_provider = IterListProvider([Nexra, llama_3_1_8b.best_provider, llama_3_1_70b.best_provider, llama_3_1_405b.best_provider,])
)
# llama 3.2
llama_3_2_11b = Model(
name = "llama-3.2-11b",
base_provider = "Meta Llama",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Airforce])
)
# llamaguard
llamaguard_7b = Model(
name = "llamaguard-7b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Airforce])
)
llamaguard_2_8b = Model(
name = "llamaguard-2-8b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Airforce])
)
### Mistral ###
mistral_7b = Model(
name = "mistral-7b",
base_provider = "Mistral",
best_provider = IterListProvider([HuggingChat, DeepInfraChat, Airforce, HuggingFace, DeepInfra])
best_provider = IterListProvider([DeepInfraChat, Airforce, HuggingFace, DeepInfra])
)
mixtral_8x7b = Model(
@ -245,7 +272,13 @@ mistral_nemo = Model(
mixtral_8x7b_dpo = Model(
name = "mixtral-8x7b-dpo",
base_provider = "NousResearch",
best_provider = Airforce
best_provider = IterListProvider([Airforce])
)
yi_34b = Model(
name = "yi-34b",
base_provider = "NousResearch",
best_provider = IterListProvider([Airforce])
)
hermes_3 = Model(
@ -310,49 +343,56 @@ gemma_2b = Model(
])
)
# gemma 2
gemma_2_27b = Model(
name = 'gemma-2-27b',
base_provider = 'Google',
best_provider = Airforce
)
gemma_2 = Model(
name = 'gemma-2',
base_provider = 'Google',
best_provider = ChatHub
best_provider = IterListProvider([
ChatHub,
gemma_2_27b.best_provider,
])
)
### Anthropic ###
claude_2 = Model(
name = 'claude-2',
base_provider = 'Anthropic',
best_provider = You
)
claude_2_0 = Model(
name = 'claude-2.0',
base_provider = 'Anthropic',
best_provider = Liaobots
)
claude_2_1 = Model(
name = 'claude-2.1',
base_provider = 'Anthropic',
best_provider = Liaobots
)
claude_2 = Model(
name = 'claude-2',
base_provider = 'Anthropic',
best_provider = IterListProvider([
You,
claude_2_1.best_provider,
])
)
# claude 3
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'Anthropic',
best_provider = Liaobots
best_provider = IterListProvider([Airforce, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'Anthropic',
best_provider = Liaobots
best_provider = IterListProvider([Airforce, Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, Liaobots])
best_provider = IterListProvider([DDG, Airforce, Liaobots])
)
claude_3 = Model(
@ -367,7 +407,7 @@ claude_3 = Model(
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Liaobots])
best_provider = IterListProvider([Blackbox, Airforce, Liaobots])
)
claude_3_5 = Model(
@ -417,15 +457,22 @@ command_r_plus = Model(
sparkdesk_v1_1 = Model(
name = 'sparkdesk-v1.1',
base_provider = 'iFlytek',
best_provider = IterListProvider([FreeChatgpt, Airforce])
best_provider = IterListProvider([FreeChatgpt])
)
### Qwen ###
# qwen 1
qwen_1_5_7b = Model(
name = 'qwen-1.5-7b',
base_provider = 'Qwen',
best_provider = Airforce
)
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
best_provider = FreeChatgpt
best_provider = IterListProvider([FreeChatgpt, Airforce])
)
qwen_1_5_72b = Model(
@ -440,22 +487,17 @@ qwen_1_5_110b = Model(
best_provider = Airforce
)
# qwen 2
qwen_2_72b = Model(
name = 'qwen-2-72b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
)
qwen_turbo = Model(
name = 'qwen-turbo',
base_provider = 'Qwen',
best_provider = Bixin123
)
qwen = Model(
name = 'qwen',
base_provider = 'Qwen',
best_provider = IterListProvider([Nexra, qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_2_72b.best_provider, qwen_turbo.best_provider])
best_provider = IterListProvider([Nexra, qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_2_72b.best_provider])
)
@ -488,13 +530,6 @@ yi_1_5_9b = Model(
best_provider = FreeChatgpt
)
yi_34b = Model(
name = 'yi-34b',
base_provider = '01-ai',
best_provider = Airforce
)
### Upstage ###
solar_1_mini = Model(
name = 'solar-1-mini',
@ -542,14 +577,6 @@ wizardlm_2_8x22b = Model(
best_provider = IterListProvider([DeepInfraChat, Airforce])
)
### Together ###
sh_n_7b = Model(
name = 'sh-n-7b',
base_provider = 'Together',
best_provider = Airforce
)
### Yorickvp ###
llava_13b = Model(
name = 'llava-13b',
@ -611,7 +638,8 @@ grok_2_mini = Model(
best_provider = Liaobots
)
# Perplexity AI
### Perplexity AI ###
sonar_online = Model(
name = 'sonar-online',
base_provider = 'Perplexity AI',
@ -625,6 +653,22 @@ sonar_chat = Model(
)
### Gryphe ###
mythomax_l2_13b = Model(
name = 'mythomax-l2-13b',
base_provider = 'Gryphe',
best_provider = IterListProvider([Airforce])
)
### Pawan ###
cosmosrp = Model(
name = 'cosmosrp',
base_provider = 'Pawan',
best_provider = IterListProvider([Airforce])
)
#############
### Image ###
@ -654,6 +698,7 @@ playground_v2_5 = Model(
)
### Flux AI ###
flux = Model(
name = 'flux',
@ -805,6 +850,14 @@ class ModelUtils:
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
# llama-3.2
'llama-3.2-11b': llama_3_2_11b,
'llama-3.2-90b': llama_3_2_90b,
# llamaguard
'llamaguard-7b': llamaguard_7b,
'llamaguard-2-8b': llamaguard_2_8b,
### Mistral ###
@ -835,12 +888,14 @@ class ModelUtils:
'gemma-2b': gemma_2b,
'gemma-2b-9b': gemma_2b_9b,
'gemma-2b-27b': gemma_2b_27b,
# gemma-2
'gemma-2': gemma_2,
'gemma-2-27b': gemma_2_27b,
### Anthropic ###
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
# claude 3
@ -880,11 +935,11 @@ class ModelUtils:
### Qwen ###
'qwen': qwen,
'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-14b': qwen_1_5_14b,
'qwen-1.5-72b': qwen_1_5_72b,
'qwen-1.5-110b': qwen_1_5_110b,
'qwen-2-72b': qwen_2_72b,
'qwen-turbo': qwen_turbo,
### Zhipu AI ###
@ -908,11 +963,7 @@ class ModelUtils:
### DeepSeek ###
'deepseek': deepseek,
### Together ###
'sh-n-7b': sh_n_7b,
### Yorickvp ###
'llava-13b': llava_13b,
@ -947,9 +998,18 @@ class ModelUtils:
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
### Perplexity AI ###
'sonar-online': sonar_online,
'sonar-chat': sonar_chat,
### Gryphe ###
'mythomax-l2-13b': sonar_chat,
### Pawan ###
'cosmosrp': cosmosrp,