Resolved merge conflicts

This commit is contained in:
kqlio67 2024-09-24 13:23:53 +03:00
parent f13b214c8b
commit f8e403a745
82 changed files with 1012 additions and 4559 deletions

139
README.md
View File

@ -69,11 +69,7 @@ As per the survey, here is a list of improvements to come
- [Web UI](#web-ui) - [Web UI](#web-ui)
- [Interference API](#interference-api) - [Interference API](#interference-api)
- [Configuration](#configuration) - [Configuration](#configuration)
- [🚀 Providers and Models](#-providers-and-models) - [🚀 Providers and Models](docs/providers-and-models.md)
- [GPT-4](#gpt-4)
- [GPT-3.5](#gpt-35)
- [Other](#other)
- [Models](#models)
- [🔗 Powered by gpt4free](#-powered-by-gpt4free) - [🔗 Powered by gpt4free](#-powered-by-gpt4free)
- [🤝 Contribute](#-contribute) - [🤝 Contribute](#-contribute)
- [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider) - [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider)
@ -318,139 +314,6 @@ export G4F_PROXY="http://host:port"
set G4F_PROXY=http://host:port set G4F_PROXY=http://host:port
``` ```
## 🚀 Providers and Models
### GPT-4
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| -------------------------------------- | ------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ----- |
| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔️ |
| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
## Best OpenSource Models
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
| Website | Provider | parameters | better than |
| ---------------------------------------------------------------------------------------- | ----------------------------------- | ----------------- | ------------------ |
| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 |
| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision |
| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active | gpt-3.5-turbo |
| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo |
### GPT-3.5
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ---------------------------------------------------------- | ----------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ---- |
| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DDG` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other
| Website | Provider | Stream | Status | Auth |
| -------------------------------------------------------------------------------------------- | ----------------------------- | ------ | ---------------------------------------------------------- | ---- |
| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
### Models
| Model | Base Provider | Provider | Website |
| -------------------------- | ------------- | ------------------------ | ----------------------------------------------- |
| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) |
| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
### Image and Vision Models
| Label | Provider | Image Model | Vision Model | Website |
| ------------------------- | ------------------------- | ----------------- | --------------- | ---------------------------------------------- |
| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) |
| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) |
| Gemini | `g4f.Provider.Gemini` | ✔️ | ✔️ | [gemini.google.com](https://gemini.google.com) |
| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) |
| Meta AI | `g4f.Provider.MetaAI` | ✔️ | ❌ | [meta.ai](https://www.meta.ai) |
| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) |
| Reka | `g4f.Provider.Reka` | ❌ | ✔️ | [chat.reka.ai](https://chat.reka.ai/) |
| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl | llava-v1.6-34b | [replicate.com](https://replicate.com) |
| You.com | `g4f.Provider.You` | dall-e-3 | ✔️ | [you.com](https://you.com) |
## 🔗 Powered by gpt4free ## 🔗 Powered by gpt4free
<table> <table>

View File

@ -0,0 +1,165 @@
## 🚀 Providers and Models
- [Providers](#Providers)
- [Models](#models)
- [Text Model](#textmodel)
- [Image Model](#imagemodel)
---
#### Providers
|Website|Provider|Text Model|Image Model|Vision Model|Stream|Status|Auth|
|--|--|--|--|--|--|--|--|
|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|`gpt-3.5-turbo, gpt-4o`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`llama-2-13b, llama-3-70b, llama-3-8b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, mixtral-8x7b, mixtral-8x22b, mistral-7b, mixtral-8x7b-dpo, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2b-9b, gemma-2b-27b, deepseek, yi-34b, wizardlm-2-8x22b, solar-10-7b, sh-n-7b, sparkdesk-v1.1,gpt-4o, gpt-4o-mini, gpt-3.5-turbo, gemini-flash, gemini-pro, dbrx-instruct`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[chat18.aichatos8.com](https://chat18.aichatos8.com)|`g4f.Provider.Binjie`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.bixin123.com](https://chat.bixin123.com)|`g4f.Provider.Bixin123`|`gpt-3.5-turbo, gpt-4-turbo, qwen-turbo`|❌|❌||![Inactive](https://img.shields.io/badge/Inactive-red)||
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackbox, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|✔|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|`gpt-4o-mini`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-405b, llama-3.1-70b, Llama-3.1-8B, mixtral-8x22b, mixtral-8x7b, wizardlm-2-8x22b, wizardlm-2-7b, qwen-2-72b, phi-3-medium-4k, gemma-2b-27b, minicpm-llama-3-v2.5, mistral-7b, lzlv_70b, openchat-3.6-8b, phind-codellama-34b-v2, dolphin-2.9.1-llama-3-70b`|❌|`minicpm-llama-3-v2.5`|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[flowgpt.com](https://flowgpt.com/chat)|`g4f.Provider.FlowGpt`|✔||❌|✔|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat.chatgpt.org.uk](https://chat.chatgpt.org.uk)|`g4f.Provider.FreeChatgpt`|`qwen-1.5-14b, sparkdesk-v1.1, qwen-2-7b, glm-4-9b, glm-3-6b, yi-1.5-9b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[free.netfly.top](https://free.netfly.top)|`g4f.Provider.FreeNetfly`|✔|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|✔|❌|✔|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, mixtral-8x7b, mixtral-8x7b-dpo, mistral-7b, phi-3-mini-4k`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, o1, o1-mini, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[lite.icoding.ink](https://lite.icoding.ink)|`g4f.Provider.LiteIcoding`|`gpt-4o-mini, gpt-4-turbo, claude-3, claude-3.5, gemini-pro`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[app.myshell.ai/chat](https://app.myshell.ai/chat)|`g4f.Provider.MyShell`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[aryahcr.cc](https://nexra.aryahcr.cc)|`g4f.Provider.Nexra`|`gpt-3, gpt-3.5-turbo, gpt-4`|`dalle, dalle-2, dalle-mini, emi`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[openrouter.ai](https://openrouter.ai)|`g4f.Provider.OpenRouter`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌|
|[]()|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[app.prodia.com](https://app.prodia.com)|`g4f.Provider.Prodia`|❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[raycast.com](https://raycast.com)|`g4f.Provider.Raycast`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[chat.reka.ai](https://chat.reka.ai/)|`g4f.Provider.Reka`|✔|❌|✔|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[replicate.com](https://replicate.com)|`g4f.Provider.Replicate`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`llama-3-70b, mixtral-8x7b, llava-13b`|`flux-schnell, sdxl, sdxl, playground-v2.5`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[talkai.info](https://talkai.info)|`g4f.Provider.TalkAi`|✔|❌|❌|✔|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.Theb`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.ThebApi`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[console.upstage.ai/playground/chat](https://console.upstage.ai/playground/chat)|`g4f.Provider.Upstage`|`solar-pro, solar-1-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[whiterabbitneo.com](https://www.whiterabbitneo.com)|`g4f.Provider.WhiteRabbitNeo`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[you.com](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌+✔|
---
### Models
#### TextModel
|Model|Base Provider|Provider|Website|
|--|--|--|-|
|gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-base)|
|gpt-3.5-turbo|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
|gpt-4|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4-turbo|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4o-mini|OpenAI|12+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|gpt-4o|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|o1-mini|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/o1)|
|o1|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/o1)|
|llama-2-13b|Meta Llama|1+ Providers|[llama.com](https://www.llama.com/llama2/)|
|llama-3|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
|llama-3-8b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
|llama-3-70b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
|llama-3.1|Meta Llama|16+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-8b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-70b|Meta Llama|9+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-405b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|mistral-7b|Mistral AI|3+ Providers|[mistral.ai](https://mistral.ai/news/announcing-mistral-7b/)|
|mixtral-8x7b|Mistral AI|5+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mixtral-8x22b|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-8x22b/)|
|mixtral-8x7b-dpo|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|gemini|Google DeepMind|7+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|gemini-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-pro|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemma-2b|Google|4+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)|
|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)|
|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)|
|claude-2|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|claude-2.0|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|claude-3|Anthropic|5+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3-opus|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3.5|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3.5-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|blackbox|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|yi-34b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-34B)|
|yi-1.5-9b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-1.5-9B)|
|phi-3-mini-4k|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)|
|phi-3-medium-4k|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)|
|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)|
|command-r-plus|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)|
|sparkdesk-v1.1|iFlytek|2+ Providers|[xfyun.cn](https://www.xfyun.cn/doc/spark/Guide.html)|
|qwen|Qwen|7+ Providers|[huggingface.co](https://huggingface.co/Qwen)|
|qwen-1.5-14b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-14B)|
|qwen-1.5-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-72B)|
|qwen-2-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
|qwen-turbo|Qwen|1+ Providers|[qwenlm.github.io](https://qwenlm.github.io/blog/qwen2.5/)|
|glm-3-6b|Zhipu AI|1+ Providers|[github.com/THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3)|
|glm-4-9B|Zhipu AI|1+ Providers|[github.com/THUDM/GLM-4](https://github.com/THUDM/GLM-4)|
|glm-4|Zhipu AI|1+ Providers|[github.com/THUDM/GLM-4](https://github.com/THUDM/GLM-4)|
|solar-1-mini|Upstage|1+ Providers|[upstage.ai/](https://www.upstage.ai/feed/product/solarmini-performance-report)|
|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)|
|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
|deepseek|DeepSeek|1+ Providers|[deepseek.com](https://www.deepseek.com/)|
|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)|
|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
|llava-13b|Yorickvp|1+ Providers|[huggingface.co](https://huggingface.co/liuhaotian/llava-v1.5-13b)|
|minicpm-llama-3-v2.5|OpenBMB|1+ Providers|[huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
|lzlv-70b|Lzlv|1+ Providers|[huggingface.co](https://huggingface.co/lizpreciatior/lzlv_70b_fp16_hf)|
|openchat-3.6-8b|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat-3.6-8b-20240522)|
|phind-codellama-34b-v2|Phind|1+ Providers|[huggingface.co](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)|
|dolphin-2.9.1-llama-3-70b|Cognitive Computations|1+ Providers|[huggingface.co](https://huggingface.co/cognitivecomputations/dolphin-2.9.1-llama-3-70b)|
|grok-2-mini|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
|grok-2|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
---
### ImageModel
|Model|Base Provider|Provider|Website|
|--|--|--|-|
|sdxl|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/en/using-diffusers/sdxl)|
|sd_3|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3)|
|playground-v2.5|Playground AI|1+ Providers|[huggingface.co](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic)|
|flux|Flux AI|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
|flux-realism|Flux AI|1+ Providers|[]()|
|flux-anime|Flux AI|1+ Providers|[]()|
|flux-3d|Flux AI|1+ Providers|[]()|
|flux-disney|Flux AI|1+ Providers|[]()|
|flux-pixel|Flux AI|1+ Providers|[]()|
|flux-4o|Flux AI|1+ Providers|[]()|
|flux-schnell|Flux AI|1+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)|
|dalle||2+ Providers|[]()|
|dalle-2||1+ Providers|[]()|
|dalle-3||1+ Providers|[]()|
|dalle-mini||1+ Providers|[]()|
|emi||1+ Providers|[]()|
|any_dark||1+ Providers|[]()|

View File

@ -16,11 +16,11 @@ class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'gpt-3.5-turbo' default_model = 'gpt-3.5-turbo'
models = [ models = [
'gpt-3.5-turbo', 'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'gpt-4o', 'gpt-4o',
'claude-3-haiku-20240307',
] ]
model_aliases = { model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307", "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
} }
@classmethod @classmethod

View File

@ -1,20 +1,17 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError from aiohttp import ClientSession, ClientResponseError
from urllib.parse import urlencode
import json import json
import io
import asyncio
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, is_accepted_format from ..image import ImageResponse
from .helper import format_prompt from .helper import format_prompt
from ..errors import ResponseStatusError
class Airforce(AsyncGeneratorProvider, ProviderModelMixin): class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce" url = "https://api.airforce"
text_api_endpoint = "https://api.airforce/chat/completions" text_api_endpoint = "https://api.airforce/chat/completions"
image_api_endpoint = "https://api.airforce/v1/imagine2" image_api_endpoint = "https://api.airforce/imagine2"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
@ -25,53 +22,38 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
text_models = [ text_models = [
# Open source models # Open source models
'llama-2-13b-chat', 'llama-2-13b-chat',
'llama-3-70b-chat', 'llama-3-70b-chat',
'llama-3-70b-chat-turbo', 'llama-3-70b-chat-turbo',
'llama-3-70b-chat-lite', 'llama-3-70b-chat-lite',
'llama-3-8b-chat', 'llama-3-8b-chat',
'llama-3-8b-chat-turbo', 'llama-3-8b-chat-turbo',
'llama-3-8b-chat-lite', 'llama-3-8b-chat-lite',
'llama-3.1-405b-turbo', 'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo', 'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo', 'llama-3.1-8b-turbo',
'LlamaGuard-2-8b', 'LlamaGuard-2-8b',
'Llama-Guard-7b', 'Llama-Guard-7b',
'Meta-Llama-Guard-3-8B', 'Meta-Llama-Guard-3-8B',
'Mixtral-8x7B-Instruct-v0.1', 'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1', 'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1', 'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2', 'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3', 'Mistral-7B-Instruct-v0.3',
'Qwen1.5-72B-Chat', 'Qwen1.5-72B-Chat',
'Qwen1.5-110B-Chat', 'Qwen1.5-110B-Chat',
'Qwen2-72B-Instruct', 'Qwen2-72B-Instruct',
'gemma-2b-it', 'gemma-2b-it',
'gemma-2-9b-it', 'gemma-2-9b-it',
'gemma-2-27b-it', 'gemma-2-27b-it',
'dbrx-instruct', 'dbrx-instruct',
'deepseek-llm-67b-chat', 'deepseek-llm-67b-chat',
'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Nous-Hermes-2-Mixtral-8x7B-DPO',
'Nous-Hermes-2-Yi-34B', 'Nous-Hermes-2-Yi-34B',
'WizardLM-2-8x22B', 'WizardLM-2-8x22B',
'SOLAR-10.7B-Instruct-v1.0', 'SOLAR-10.7B-Instruct-v1.0',
'StripedHyena-Nous-7B', 'StripedHyena-Nous-7B',
'sparkdesk', 'sparkdesk',
# Other models # Other models
'chatgpt-4o-latest', 'chatgpt-4o-latest',
'gpt-4', 'gpt-4',
@ -85,10 +67,10 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613',
'gemini-1.5-flash', 'gemini-1.5-flash',
'gemini-1.5-pro', 'gemini-1.5-pro',
] ]
image_models = [ image_models = [
'flux', 'flux',
'flux-realism', 'flux-realism',
@ -96,7 +78,9 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'flux-3d', 'flux-3d',
'flux-disney', 'flux-disney',
'flux-pixel', 'flux-pixel',
'flux-4o',
'any-dark', 'any-dark',
'dall-e-3',
] ]
models = [ models = [
@ -106,61 +90,47 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
# Open source models # Open source models
"llama-2-13b": "llama-2-13b-chat", "llama-2-13b": "llama-2-13b-chat",
"llama-3-70b": "llama-3-70b-chat", "llama-3-70b": "llama-3-70b-chat",
"llama-3-70b": "llama-3-70b-chat-turbo", "llama-3-70b": "llama-3-70b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite", "llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat", "llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo", "llama-3-8b": "llama-3-8b-chat-turbo",
"llama-3-8b": "llama-3-8b-chat-lite", "llama-3-8b": "llama-3-8b-chat-lite",
"llama-3.1-405b": "llama-3.1-405b-turbo", "llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo", "llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo", "llama-3.1-8b": "llama-3.1-8b-turbo",
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1", "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.1", "mistral-7b": "Mistral-7B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.2", "mistral-7b": "Mistral-7B-Instruct-v0.2",
"mistral-7b": "Mistral-7B-Instruct-v0.3", "mistral-7b": "Mistral-7B-Instruct-v0.3",
"mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"qwen-1.5-72b": "Qwen1.5-72B-Chat",
"qwen-1-5-72b": "Qwen1.5-72B-Chat", "qwen-1.5-110b": "Qwen1.5-110B-Chat",
"qwen-1_5-110b": "Qwen1.5-110B-Chat",
"qwen-2-72b": "Qwen2-72B-Instruct", "qwen-2-72b": "Qwen2-72B-Instruct",
"gemma-2b": "gemma-2b-it", "gemma-2b": "gemma-2b-it",
"gemma-2b-9b": "gemma-2-9b-it", "gemma-2b-9b": "gemma-2-9b-it",
"gemma-2b-27b": "gemma-2-27b-it", "gemma-2b-27b": "gemma-2-27b-it",
"deepseek": "deepseek-llm-67b-chat", "deepseek": "deepseek-llm-67b-chat",
"yi-34b": "Nous-Hermes-2-Yi-34B", "yi-34b": "Nous-Hermes-2-Yi-34B",
"wizardlm-2-8x22b": "WizardLM-2-8x22B", "wizardlm-2-8x22b": "WizardLM-2-8x22B",
"solar-10-7b": "SOLAR-10.7B-Instruct-v1.0", "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
"sh-n-7b": "StripedHyena-Nous-7B", "sh-n-7b": "StripedHyena-Nous-7B",
"sparkdesk-v1.1": "sparkdesk", "sparkdesk-v1.1": "sparkdesk",
# Other models # Other models
"gpt-4o": "chatgpt-4o-latest", "gpt-4o": "chatgpt-4o-latest",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18", "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125", "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106", "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k", "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613", "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
"gemini-flash": "gemini-1.5-flash", "gemini-flash": "gemini-1.5-flash",
"gemini-pro": "gemini-1.5-pro", "gemini-pro": "gemini-1.5-pro",
# Image models
"dalle-3": "dall-e-3",
} }
@classmethod @classmethod
@ -178,16 +148,20 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
"accept-language": "en-US,en;q=0.9", "accept-language": "en-US,en;q=0.9",
"content-type": "application/json", "content-type": "application/json",
"origin": "https://api.airforce", "origin": "https://api.airforce",
"sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"', "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
"authorization": "Bearer null",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://llmplayground.net/",
"sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"', "sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty", "sec-fetch-dest": "empty",
"sec-fetch-mode": "cors", "sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site", "sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
} }
if model in cls.image_models: if model in cls.image_models:
async for item in cls.generate_image(model, messages, headers, proxy, **kwargs): async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
yield item yield item
@ -197,31 +171,44 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod @classmethod
async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
async with ClientSession(headers=headers) as session: async with ClientSession() as session:
data = { data = {
"messages": [{"role": "user", "content": format_prompt(messages)}], "messages": [{"role": "user", "content": message['content']} for message in messages],
"model": model, "model": model,
"max_tokens": kwargs.get('max_tokens', 4096),
"temperature": kwargs.get('temperature', 1), "temperature": kwargs.get('temperature', 1),
"top_p": kwargs.get('top_p', 1), "top_p": kwargs.get('top_p', 1),
"stream": True "stream": True
} }
async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: try:
async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line: if line:
line = line.decode('utf-8').strip() line = line.decode('utf-8').strip()
if line.startswith("data: "): if line.startswith("data: "):
if line == "data: [DONE]":
break
try: try:
data = json.loads(line[6:]) data = json.loads(line[6:])
if 'choices' in data and len(data['choices']) > 0: if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {}) delta = data['choices'][0].get('delta', {})
if 'content' in delta: if 'content' in delta:
yield delta['content'] content = delta['content']
if "One message exceeds the 1000chars per message limit" in content:
raise ResponseStatusError(
"Message too long",
400,
"Please try a shorter message."
)
yield content
except json.JSONDecodeError: except json.JSONDecodeError:
continue continue
elif line == "data: [DONE]": except ResponseStatusError as e:
break raise e
except Exception as e:
raise ResponseStatusError(str(e), 500, "An unexpected error occurred")
@classmethod @classmethod
async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
@ -246,10 +233,10 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
else: else:
try: try:
text = content.decode('utf-8', errors='ignore') text = content.decode('utf-8', errors='ignore')
yield f"Error: {text}" raise ResponseStatusError("Image generation failed", response.status, text)
except Exception as decode_error: except Exception as decode_error:
yield f"Error: Unable to decode response - {str(decode_error)}" raise ResponseStatusError("Decoding error", 500, str(decode_error))
except ClientResponseError as e: except ClientResponseError as e:
yield f"Error: HTTP {e.status}: {e.message}" raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
except Exception as e: except Exception as e:
yield f"Unexpected error: {str(e)}" raise ResponseStatusError("Unexpected error", 500, str(e))

View File

@ -9,7 +9,7 @@ from ..webdriver import WebDriver
class Aura(AsyncGeneratorProvider): class Aura(AsyncGeneratorProvider):
url = "https://openchat.team" url = "https://openchat.team"
working = True working = False
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View File

@ -15,7 +15,7 @@ class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_4 = True supports_gpt_4 = True
default_model = 'gpt-3.5-turbo-0125' default_model = 'gpt-3.5-turbo-0125'
models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo'] models = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
model_aliases = { model_aliases = {
"gpt-3.5-turbo": "gpt-3.5-turbo-0125", "gpt-3.5-turbo": "gpt-3.5-turbo-0125",

View File

@ -1,7 +1,6 @@
from __future__ import annotations from __future__ import annotations
import re import re
import json
import random import random
import string import string
from aiohttp import ClientSession from aiohttp import ClientSession
@ -25,22 +24,44 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-8b", "llama-3.1-8b",
'llama-3.1-70b', 'llama-3.1-70b',
'llama-3.1-405b', 'llama-3.1-405b',
'ImageGenerationLV45LJp' 'ImageGenerationLV45LJp',
'GPT-4o',
'Gemini-PRO',
'Claude-Sonnet-3.5',
] ]
model_config = { model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"flux": "ImageGenerationLV45LJp",
"gpt-4o": "GPT-4o",
"gemini-pro": "Gemini-PRO",
"claude-3.5-sonnet": "Claude-Sonnet-3.5",
}
agentMode = {
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
trendingAgentMode = {
"blackbox": {}, "blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, }
userSelectedModel = {
"GPT-4o": "GPT-4o",
"Gemini-PRO": "Gemini-PRO",
'Claude-Sonnet-3.5': "Claude-Sonnet-3.5",
} }
@classmethod @classmethod
def get_model(cls, model: str) -> str: def get_model(cls, model: str) -> str:
if model in cls.models: if model in cls.models:
return model return model
elif model in cls.userSelectedModel:
return model
elif model in cls.model_aliases: elif model in cls.model_aliases:
return cls.model_aliases[model] return cls.model_aliases[model]
else: else:
@ -75,6 +96,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
} }
if model in cls.userSelectedModel:
prefix = f"@{cls.userSelectedModel[model]}"
if not messages[0]['content'].startswith(prefix):
messages[0]['content'] = f"{prefix} {messages[0]['content']}"
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
if image is not None: if image is not None:
messages[-1]["data"] = { messages[-1]["data"] = {
@ -92,21 +118,27 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"codeModelMode": True, "codeModelMode": True,
"agentMode": {}, "agentMode": {},
"trendingAgentMode": {}, "trendingAgentMode": {},
"userSelectedModel": None,
"isMicMode": False, "isMicMode": False,
"maxTokens": None, "maxTokens": 99999999,
"playgroundTopP": 0.9,
"playgroundTemperature": 0.5,
"isChromeExt": False, "isChromeExt": False,
"githubToken": None, "githubToken": None,
"clickedAnswer2": False, "clickedAnswer2": False,
"clickedAnswer3": False, "clickedAnswer3": False,
"clickedForceWebSearch": False, "clickedForceWebSearch": False,
"visitFromDelta": False, "visitFromDelta": False,
"mobileClient": False "mobileClient": False,
"webSearchMode": False,
} }
if model == 'ImageGenerationLV45LJp': if model in cls.agentMode:
data["agentMode"] = cls.model_config[model] data["agentMode"] = cls.agentMode[model]
else: elif model in cls.trendingAgentMode:
data["trendingAgentMode"] = cls.model_config[model] data["trendingAgentMode"] = cls.trendingAgentMode[model]
elif model in cls.userSelectedModel:
data["userSelectedModel"] = cls.userSelectedModel[model]
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
@ -119,9 +151,10 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
else: else:
raise Exception("Image URL not found in the response") raise Exception("Image URL not found in the response")
else: else:
async for chunk in response.content: async for chunk in response.content.iter_any():
if chunk: if chunk:
decoded_chunk = chunk.decode() decoded_chunk = chunk.decode()
if decoded_chunk.startswith('$@$v=undefined-rv1$@$'): # Видаляємо префікс $@$v=v1.10-rv1$@$ та інші подібні
decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):] decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
if decoded_chunk.strip(): # Перевіряємо, чи залишився якийсь текст після видалення префікса
yield decoded_chunk yield decoded_chunk

View File

@ -1,75 +0,0 @@
from __future__ import annotations
import time
from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()

View File

@ -76,7 +76,13 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
supports_stream = True supports_stream = True
models = [
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4-turbo',
'chatgpt-4o-latest',
]
@classmethod @classmethod
def create_completion( def create_completion(

View File

@ -1,74 +1,64 @@
from __future__ import annotations from __future__ import annotations
from ..typing import Messages, CreateResult from aiohttp import ClientSession
from ..providers.base_provider import AbstractProvider, ProviderModelMixin import os
import json
import re
import requests, os, re, json from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class ChatGptEs(AbstractProvider, ProviderModelMixin): class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
label = "ChatGptEs" url = "https://chatgpt.es"
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
working = True working = True
supports_message_history = True supports_gpt_4 = True
supports_stream = True
supports_system_message = True supports_system_message = True
supports_stream = False supports_message_history = True
default_model = 'gpt-4o'
models = ['gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest']
model_aliases = {
"gpt-4o": "chatgpt-4o-latest",
}
@classmethod @classmethod
def create_completion( def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, proxy: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> AsyncResult:
model = cls.get_model(model)
if model not in [
'gpt-4o',
'gpt-4o-mini',
'chatgpt-4o-latest'
]:
raise ValueError(f"Unsupported model: {model}")
headers = { headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', "authority": "chatgpt.es",
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', "accept": "application/json",
'cache-control': 'no-cache', "origin": cls.url,
'pragma': 'no-cache', "referer": f"{cls.url}/chat",
'priority': 'u=0, i', "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
'referer': 'https://www.google.com/',
'sec-ch-ua': '"Google Chrome";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'cross-site',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
} }
response = requests.get('https://chatgpt.es/', headers=headers) async with ClientSession(headers=headers) as session:
nonce_ = re.findall(r'data-nonce="(.+?)"', response.text)[0] initial_response = await session.get(cls.url)
post_id = re.findall(r'data-post-id="(.+?)"', response.text)[0] nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0]
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]
headers = {
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'origin': 'https://chatgpt.es',
'pragma': 'no-cache',
'priority': 'u=1, i',
'referer': 'https://chatgpt.es/',
'sec-ch-ua': '"Google Chrome";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
}
conversation_history = [ conversation_history = [
"Human: stricly respond in the same language as my prompt, preferably english" "Human: strictly respond in the same language as my prompt, preferably English"
] ]
for message in messages[:-1]: for message in messages[:-1]:
@ -80,7 +70,7 @@ class ChatGptEs(AbstractProvider, ProviderModelMixin):
payload = { payload = {
'_wpnonce': nonce_, '_wpnonce': nonce_,
'post_id': post_id, 'post_id': post_id,
'url': 'https://chatgpt.es', 'url': cls.url,
'action': 'wpaicg_chat_shortcode_message', 'action': 'wpaicg_chat_shortcode_message',
'message': messages[-1]['content'], 'message': messages[-1]['content'],
'bot_id': '0', 'bot_id': '0',
@ -89,7 +79,7 @@ class ChatGptEs(AbstractProvider, ProviderModelMixin):
'wpaicg_chat_history': json.dumps(conversation_history) 'wpaicg_chat_history': json.dumps(conversation_history)
} }
response = requests.post('https://chatgpt.es/wp-admin/admin-ajax.php', async with session.post(cls.api_endpoint, headers=headers, data=payload) as response:
headers=headers, data=payload).json() response.raise_for_status()
result = await response.json()
return (response['data']) yield result['data']

View File

@ -13,7 +13,13 @@ class Chatgpt4o(AsyncProvider, ProviderModelMixin):
working = True working = True
_post_id = None _post_id = None
_nonce = None _nonce = None
default_model = 'gpt-4o' default_model = 'gpt-4o-mini-2024-07-18'
models = [
'gpt-4o-mini-2024-07-18',
]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}
@classmethod @classmethod

View File

@ -1,94 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from asyncio import sleep
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class CodeNews(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://codenews.cc"
api_endpoint = "https://codenews.cc/chatxyz13"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
supports_stream = True
supports_system_message = False
supports_message_history = False
default_model = 'free_gpt'
models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf']
model_aliases = {
"glm-4": "free_gpt",
"gpt-3.5-turbo": "chatpdf",
"deepseek": "deepseek-coder",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/chatgpt",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"x-requested-with": "XMLHttpRequest",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"chatgpt_input": prompt,
"qa_type2": model,
"chatgpt_version_value": "20240804",
"enable_web_search": "0",
"enable_agent": "0",
"dy_video_text_extract": "0",
"enable_summary": "0",
}
async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response:
response.raise_for_status()
json_data = await response.json()
chat_id = json_data["data"]["id"]
headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8"
data = {"current_req_count": "2"}
while True:
async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response:
response.raise_for_status()
json_data = await response.json()
if json_data["data"]:
yield json_data["data"]
break
else:
await sleep(1) # Затримка перед наступним запитом

View File

@ -2,60 +2,57 @@ from __future__ import annotations
import json import json
import aiohttp import aiohttp
import asyncio from aiohttp import ClientSession
from typing import Optional
import base64
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.conversation import BaseConversation from .helper import format_prompt
class DDG(AsyncGeneratorProvider, ProviderModelMixin): class DDG(AsyncGeneratorProvider, ProviderModelMixin):
url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8") url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True supports_message_history = True
default_model = "gpt-4o-mini" default_model = "gpt-4o-mini"
models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"] models = [
"gpt-4o-mini",
"claude-3-haiku-20240307",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mistralai/Mixtral-8x7B-Instruct-v0.1"
]
model_aliases = { model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307", "claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1" "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
} }
# Obfuscated URLs and headers @classmethod
status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8") def get_model(cls, model: str) -> str:
chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8") return cls.model_aliases.get(model, model) if model in cls.model_aliases else cls.default_model
referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
headers = {
'User-Agent': user_agent,
'Accept': 'text/event-stream',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br, zstd',
'Referer': referer,
'Content-Type': 'application/json',
'Origin': origin,
'Connection': 'keep-alive',
'Cookie': 'dcm=3',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Pragma': 'no-cache',
'TE': 'trailers'
}
@classmethod @classmethod
async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]: async def get_vqd(cls):
status_url = "https://duckduckgo.com/duckchat/v1/status"
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'Accept': 'text/event-stream',
'x-vqd-accept': '1'
}
async with aiohttp.ClientSession() as session:
try: try:
async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response: async with session.get(status_url, headers=headers) as response:
await raise_for_status(response) if response.status == 200:
return response.headers.get("x-vqd-4") return response.headers.get("x-vqd-4")
else:
print(f"Error: Status code {response.status}")
return None
except Exception as e: except Exception as e:
print(f"Error getting VQD: {e}") print(f"Error getting VQD: {e}")
return None return None
@ -65,52 +62,48 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
conversation: dict = None,
proxy: str = None, proxy: str = None,
connector: aiohttp.BaseConnector = None,
conversation: Conversation = None,
return_conversation: bool = False,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session: model = cls.get_model(model)
vqd_4 = None
if conversation is not None and len(messages) > 1:
vqd_4 = conversation.vqd_4
messages = [*conversation.messages, messages[-2], messages[-1]]
else:
for _ in range(3): # Try up to 3 times to get a valid VQD
vqd_4 = await cls.get_vqd(session)
if vqd_4:
break
await asyncio.sleep(1) # Wait a bit before retrying
if not vqd_4: headers = {
raise Exception("Failed to obtain a valid VQD token") 'accept': 'text/event-stream',
'content-type': 'application/json',
messages = [messages[-1]] # Only use the last message for new conversations 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
payload = {
'model': cls.get_model(model),
'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
} }
async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response: vqd = conversation.get('vqd') if conversation else await cls.get_vqd()
await raise_for_status(response) if not vqd:
if return_conversation: raise Exception("Failed to obtain VQD token")
yield Conversation(vqd_4, messages)
headers['x-vqd-4'] = vqd
if conversation:
message_history = conversation.get('messages', [])
message_history.append({"role": "user", "content": format_prompt(messages)})
else:
message_history = [{"role": "user", "content": format_prompt(messages)}]
async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": message_history
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line:
chunk = line[6:] decoded_line = line.decode('utf-8')
if chunk.startswith(b"[DONE]"): if decoded_line.startswith('data: '):
json_str = decoded_line[6:]
if json_str == '[DONE]':
break break
try: try:
data = json.loads(chunk) json_data = json.loads(json_str)
if "message" in data and data["message"]: if 'message' in json_data:
yield data["message"] yield json_data['message']
except json.JSONDecodeError: except json.JSONDecodeError:
print(f"Failed to decode JSON: {chunk}") pass
class Conversation(BaseConversation):
def __init__(self, vqd_4: str, messages: Messages) -> None:
self.vqd_4 = vqd_4
self.messages = messages

View File

@ -0,0 +1,142 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com/chat"
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct'
models = [
'meta-llama/Meta-Llama-3.1-405B-Instruct',
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-8B-Instruct',
'mistralai/Mixtral-8x22B-Instruct-v0.1',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'microsoft/WizardLM-2-8x22B',
'microsoft/WizardLM-2-7B',
'Qwen/Qwen2-72B-Instruct',
'microsoft/Phi-3-medium-4k-instruct',
'google/gemma-2-27b-it',
'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available
'mistralai/Mistral-7B-Instruct-v0.3',
'lizpreciatior/lzlv_70b_fp16_hf',
'openchat/openchat-3.6-8b',
'Phind/Phind-CodeLlama-34B-v2',
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
]
model_aliases = {
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
"gemma-2b-27b": "google/gemma-2-27b-it",
"minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5",
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"lzlv_70b": "lizpreciatior/lzlv_70b_fp16_hf",
"openchat-3.6-8b": "openchat/openchat-3.6-8b",
"phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
"dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
image: ImageType = None,
image_name: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
'Accept-Language': 'en-US,en;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
'Origin': 'https://deepinfra.com',
'Pragma': 'no-cache',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
'accept': 'text/event-stream',
'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
'model': model,
'messages': [
{'role': 'system', 'content': 'Be a helpful assistant'},
{'role': 'user', 'content': prompt}
],
'stream': True
}
if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None:
data['messages'][-1]['content'] = [
{
'type': 'image_url',
'image_url': {
'url': to_data_uri(image)
}
},
{
'type': 'text',
'text': messages[-1]['content']
}
]
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
decoded_line = line.decode('utf-8').strip()
if decoded_line.startswith('data:'):
json_part = decoded_line[5:].strip()
if json_part == '[DONE]':
break
try:
data = json.loads(json_part)
choices = data.get('choices', [])
if choices:
delta = choices[0].get('delta', {})
content = delta.get('content', '')
if content:
yield content
except json.JSONDecodeError:
print(f"JSON decode error: {json_part}")

View File

@ -11,7 +11,8 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com" url = "https://deepinfra.com"
parent = "DeepInfra" parent = "DeepInfra"
working = True working = True
default_model = 'stability-ai/sdxl' needs_auth = True
default_model = ''
image_models = [default_model] image_models = [default_model]
@classmethod @classmethod

View File

@ -12,7 +12,7 @@ from ..requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat" url = "https://flowgpt.com/chat"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True

View File

@ -1,59 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string, get_connector
from ..requests import raise_for_status, get_args_from_browser, WebDriver
from ..webdriver import has_seleniumwire
from ..errors import MissingRequirementsError
class GptTalkRu(AsyncGeneratorProvider):
url = "https://gpttalk.ru"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
if not has_seleniumwire:
raise MissingRequirementsError('Install "selenium-wire" package')
args = get_args_from_browser(f"{cls.url}", webdriver)
args["headers"]["accept"] = "application/json, text/plain, */*"
async with ClientSession(connector=get_connector(connector, proxy), **args) as session:
async with session.get("https://gpttalk.ru/getToken") as response:
await raise_for_status(response)
public_key = (await response.json())["response"]["key"]["publicKey"]
random_string = get_random_string(8)
data = {
"model": model,
"modelType": 1,
"prompt": messages,
"responseType": "stream",
"security": {
"randomMessage": random_string,
"shifrText": encrypt(public_key, random_string)
}
}
async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def encrypt(public_key: str, value: str) -> str:
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import base64
rsa_key = RSA.importKey(public_key)
cipher = PKCS1_v1_5.new(rsa_key)
return base64.b64encode(cipher.encrypt(value.encode())).decode()

View File

@ -36,6 +36,24 @@ models = {
"tokenLimit": 7800, "tokenLimit": 7800,
"context": "8K", "context": "8K",
}, },
"o1-preview": {
"id": "o1-preview",
"name": "o1-preview",
"model": "o1",
"provider": "OpenAI",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
},
"o1-mini": {
"id": "o1-mini",
"name": "o1-mini",
"model": "o1",
"provider": "OpenAI",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
},
"gpt-4-turbo-2024-04-09": { "gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09", "id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo", "name": "GPT-4-Turbo",
@ -54,14 +72,23 @@ models = {
"tokenLimit": 126000, "tokenLimit": 126000,
"context": "128K", "context": "128K",
}, },
"gpt-4-0613": { "grok-2": {
"id": "gpt-4-0613", "id": "grok-2",
"name": "GPT-4-0613", "name": "Grok-2",
"model": "ChatGPT", "model": "Grok",
"provider": "OpenAI", "provider": "x.ai",
"maxLength": 32000, "maxLength": 400000,
"tokenLimit": 7600, "tokenLimit": 100000,
"context": "8K", "context": "100K",
},
"grok-2-mini": {
"id": "grok-2-mini",
"name": "Grok-2-mini",
"model": "Grok",
"provider": "x.ai",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
}, },
"claude-3-opus-20240229": { "claude-3-opus-20240229": {
"id": "claude-3-opus-20240229", "id": "claude-3-opus-20240229",
@ -126,17 +153,8 @@ models = {
"tokenLimit": 200000, "tokenLimit": 200000,
"context": "200K", "context": "200K",
}, },
"gemini-1.0-pro-latest": { "gemini-1.5-flash-exp-0827": {
"id": "gemini-1.0-pro-latest", "id": "gemini-1.5-flash-exp-0827",
"name": "Gemini-Pro",
"model": "Gemini",
"provider": "Google",
"maxLength": 120000,
"tokenLimit": 30000,
"context": "32K",
},
"gemini-1.5-flash-latest": {
"id": "gemini-1.5-flash-latest",
"name": "Gemini-1.5-Flash-1M", "name": "Gemini-1.5-Flash-1M",
"model": "Gemini", "model": "Gemini",
"provider": "Google", "provider": "Google",
@ -144,8 +162,8 @@ models = {
"tokenLimit": 1000000, "tokenLimit": 1000000,
"context": "1024K", "context": "1024K",
}, },
"gemini-1.5-pro-latest": { "gemini-1.5-pro-exp-0827": {
"id": "gemini-1.5-pro-latest", "id": "gemini-1.5-pro-exp-0827",
"name": "Gemini-1.5-Pro-1M", "name": "Gemini-1.5-Pro-1M",
"model": "Gemini", "model": "Gemini",
"provider": "Google", "provider": "Google",
@ -162,12 +180,15 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
supports_gpt_4 = True supports_gpt_4 = True
default_model = "gpt-4o" default_model = "gpt-3.5-turbo"
models = list(models.keys()) models = list(models.keys())
model_aliases = { model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free", "gpt-4o": "gpt-4o-free",
"o1": "o1-preview",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09", "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4o": "gpt-4o-2024-08-06", "gpt-4o": "gpt-4o-2024-08-06",
"gpt-4": "gpt-4-0613", "gpt-4": "gpt-4-0613",
@ -176,13 +197,12 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-opus": "claude-3-opus-20240229-aws", "claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229", "claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620", "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307", "claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1", "claude-2.1": "claude-2.1",
"gemini-pro": "gemini-1.0-pro-latest", "gemini-flash": "gemini-1.5-flash-exp-0827",
"gemini-flash": "gemini-1.5-flash-latest", "gemini-pro": "gemini-1.5-pro-exp-0827",
"gemini-pro": "gemini-1.5-pro-latest",
} }
_auth_code = "" _auth_code = ""

View File

@ -21,6 +21,25 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
'gemini-1.5', 'gemini-1.5',
] ]
model_aliases = {
"gpt-4o-mini": "gpt-4o",
"gemini-pro": "gemini-1.5",
}
bearer_tokens = [
"aa3020ee873e40cb8b3f515a0708ebc4",
"5d69cd271b144226ac1199b3c849a566",
"62977f48a95844f8853a953679401850",
"d815b091959e42dd8b7871dfaf879485"
]
current_token_index = 0
@classmethod
def get_next_bearer_token(cls):
token = cls.bearer_tokens[cls.current_token_index]
cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
return token
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
cls, cls,
@ -29,10 +48,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
bearer_token = cls.get_next_bearer_token()
headers = { headers = {
"Accept": "*/*", "Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9", "Accept-Language": "en-US,en;q=0.9",
"Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4", "Authorization": f"Bearer {bearer_token}",
"Connection": "keep-alive", "Connection": "keep-alive",
"Content-Type": "application/json;charset=utf-8", "Content-Type": "application/json;charset=utf-8",
"DNT": "1", "DNT": "1",
@ -87,20 +107,17 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
content = part[6:].strip() content = part[6:].strip()
if content and content != "[DONE]": if content and content != "[DONE]":
content = content.strip('"') content = content.strip('"')
# Decoding each content block
decoded_content = decode_content(content) decoded_content = decode_content(content)
full_response += decoded_content full_response += decoded_content
full_response = ( full_response = (
full_response.replace('""', '') # Handle double quotes full_response.replace('""', '')
.replace('" "', ' ') # Handle space within quotes .replace('" "', ' ')
.replace("\\n\\n", "\n\n") .replace("\\n\\n", "\n\n")
.replace("\\n", "\n") .replace("\\n", "\n")
.replace('\\"', '"') .replace('\\"', '"')
.strip() .strip()
) )
# Add filter to remove unwanted text
filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL) filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
# Remove extra quotes at the beginning and end
cleaned_response = filtered_response.strip().strip('"') cleaned_response = filtered_response.strip().strip('"')
yield cleaned_response yield cleaned_response

View File

@ -1,72 +1,57 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession
import hashlib
import time import time
import random import random
import hashlib
import re import re
from aiohttp import ClientSession import json
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt from .helper import format_prompt
class MagickPen(AsyncGeneratorProvider, ProviderModelMixin): class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://magickpen.com" url = "https://magickpen.com"
api_endpoint_free = "https://api.magickpen.com/chat/free" api_endpoint = "https://api.magickpen.com/ask"
api_endpoint_ask = "https://api.magickpen.com/ask"
working = True working = True
supports_gpt_4 = True supports_gpt_4 = True
supports_stream = False supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'free' default_model = 'gpt-4o-mini'
models = ['free', 'ask'] models = ['gpt-4o-mini']
model_aliases = {
"gpt-4o-mini": "free",
"gpt-4o-mini": "ask",
}
@classmethod @classmethod
def get_model(cls, model: str) -> str: async def fetch_api_credentials(cls) -> tuple:
if model in cls.models: url = "https://magickpen.com/_nuxt/9e47cd7579e60a9d1f13.js"
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def get_secrets(cls):
url = 'https://magickpen.com/_nuxt/02c76dc.js'
async with ClientSession() as session: async with ClientSession() as session:
async with session.get(url) as response: async with session.get(url) as response:
if response.status == 200:
text = await response.text() text = await response.text()
x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text)
secret_match = re.search(r'secret:\s*"([^"]+)"', text)
x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None # Extract the necessary values from the file
secret = secret_match.group(1) if secret_match else None pattern = r'"X-API-Secret":"(\w+)"'
match = re.search(pattern, text)
X_API_SECRET = match.group(1) if match else None
# Generate timestamp and nonce dynamically # Generate timestamp and nonce
timestamp = str(int(time.time() * 1000)) timestamp = str(int(time.time() * 1000)) # in milliseconds
nonce = str(random.random()) nonce = str(random.random())
# Generate signature # Generate the signature
signature_parts = ["TGDBU9zCgM", timestamp, nonce] s = ["TGDBU9zCgM", timestamp, nonce]
signature_string = "".join(sorted(signature_parts)) s.sort()
signature_string = ''.join(s)
signature = hashlib.md5(signature_string.encode()).hexdigest() signature = hashlib.md5(signature_string.encode()).hexdigest()
return { pattern = r'secret:"(\w+)"'
'X-API-Secret': x_api_secret, match = re.search(pattern, text)
'signature': signature, secret = match.group(1) if match else None
'timestamp': timestamp,
'nonce': nonce, if X_API_SECRET and timestamp and nonce and secret:
'secret': secret return X_API_SECRET, signature, timestamp, nonce, secret
}
else: else:
print(f"Error while fetching the file: {response.status}") raise Exception("Unable to extract all the necessary data from the JavaScript file.")
return None
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -77,54 +62,30 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model)
X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
secrets = await cls.get_secrets()
if not secrets:
raise Exception("Failed to obtain necessary secrets")
headers = { headers = {
"accept": "application/json, text/plain, */*", 'accept': 'application/json, text/plain, */*',
"accept-language": "en-US,en;q=0.9", 'accept-language': 'en-US,en;q=0.9',
"cache-control": "no-cache", 'content-type': 'application/json',
"content-type": "application/json", 'nonce': nonce,
"nonce": secrets['nonce'], 'origin': cls.url,
"origin": "https://magickpen.com", 'referer': f"{cls.url}/",
"pragma": "no-cache", 'secret': secret,
"priority": "u=1, i", 'signature': signature,
"referer": "https://magickpen.com/", 'timestamp': timestamp,
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', 'x-api-secret': X_API_SECRET,
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"secret": secrets['secret'],
"signature": secrets['signature'],
"timestamp": secrets['timestamp'],
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"x-api-secret": secrets['X-API-Secret']
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
if model == 'free': prompt = format_prompt(messages)
data = { payload = {
"history": [{"role": "user", "content": format_prompt(messages)}] 'query': prompt,
'turnstileResponse': '',
'action': 'verify'
} }
async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response: async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
response.raise_for_status()
result = await response.text()
yield result
elif model == 'ask':
data = {
"query": format_prompt(messages),
"plan": "Pay as you go"
}
async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.content: async for chunk in response.content:
if chunk: if chunk:
yield chunk.decode() yield chunk.decode()
else:
raise ValueError(f"Unknown model: {model}")

View File

@ -22,6 +22,7 @@ class Pi(AbstractProvider):
proxy: str = None, proxy: str = None,
timeout: int = 180, timeout: int = 180,
conversation_id: str = None, conversation_id: str = None,
webdriver: WebDriver = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if cls._session is None: if cls._session is None:

View File

@ -1,131 +0,0 @@
from __future__ import annotations
import json
from typing import AsyncGenerator
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Snova(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://fast.snova.ai"
api_endpoint = "https://fast.snova.ai/api/completion"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'Meta-Llama-3.1-8B-Instruct'
models = [
'Meta-Llama-3.1-8B-Instruct',
'Meta-Llama-3.1-70B-Instruct',
'Meta-Llama-3.1-405B-Instruct',
'Samba-CoE',
'ignos/Mistral-T5-7B-v1', # Error with the answer
'v1olet/v1olet_merged_dpo_7B',
'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
]
model_aliases = {
"llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
"llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
"llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
"mistral-7b": "ignos/Mistral-T5-7B-v1",
"samba-coe-v0.1": "Samba-CoE",
"v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
"westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncGenerator[str, None]:
model = cls.get_model(model)
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"body": {
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": format_prompt(messages),
"id": "1-id",
"ref": "1-ref",
"revision": 1,
"draft": False,
"status": "done",
"enableRealTimeChat": False,
"meta": None
}
],
"max_tokens": 1000,
"stop": ["<|eot_id|>"],
"stream": True,
"stream_options": {"include_usage": True},
"model": model
},
"env_type": "tp16"
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = ""
async for line in response.content:
line = line.decode().strip()
if line.startswith("data: "):
data = line[6:]
if data == "[DONE]":
break
try:
json_data = json.loads(data)
choices = json_data.get("choices", [])
if choices:
delta = choices[0].get("delta", {})
content = delta.get("content", "")
full_response += content
except json.JSONDecodeError:
continue
except Exception as e:
print(f"Error processing chunk: {e}")
print(f"Problematic data: {data}")
continue
yield full_response.strip()

View File

@ -1,103 +0,0 @@
from __future__ import annotations
import json
import re
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.twitterbio.io"
api_endpoint_mistral = "https://www.twitterbio.io/api/mistral"
api_endpoint_openai = "https://www.twitterbio.io/api/openai"
working = True
supports_gpt_35_turbo = True
default_model = 'gpt-3.5-turbo'
models = [
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'gpt-3.5-turbo',
]
model_aliases = {
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
return cls.default_model
@staticmethod
def format_text(text: str) -> str:
text = re.sub(r'\s+', ' ', text.strip())
text = re.sub(r'\s+([,.!?])', r'\1', text)
return text
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"prompt": f'{prompt}.'
}
if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
api_endpoint = cls.api_endpoint_mistral
elif model == 'gpt-3.5-turbo':
api_endpoint = cls.api_endpoint_openai
else:
raise ValueError(f"Unsupported model: {model}")
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
buffer = ""
async for line in response.content:
line = line.decode('utf-8').strip()
if line.startswith('data: '):
try:
json_data = json.loads(line[6:])
if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
if 'choices' in json_data and len(json_data['choices']) > 0:
text = json_data['choices'][0].get('text', '')
if text:
buffer += text
elif model == 'gpt-3.5-turbo':
text = json_data.get('text', '')
if text:
buffer += text
except json.JSONDecodeError:
continue
elif line == 'data: [DONE]':
break
if buffer:
yield cls.format_text(buffer)

View File

@ -12,10 +12,11 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://console.upstage.ai/playground/chat" url = "https://console.upstage.ai/playground/chat"
api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions" api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
working = True working = True
default_model = 'upstage/solar-1-mini-chat' default_model = 'solar-pro'
models = [ models = [
'upstage/solar-1-mini-chat', 'upstage/solar-1-mini-chat',
'upstage/solar-1-mini-chat-ja', 'upstage/solar-1-mini-chat-ja',
'solar-pro',
] ]
model_aliases = { model_aliases = {
"solar-1-mini": "upstage/solar-1-mini-chat", "solar-1-mini": "upstage/solar-1-mini-chat",

View File

@ -1,104 +0,0 @@
from __future__ import annotations
import json, base64, requests, random, os
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider
from ..requests import raise_for_status
from ..errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai'
working = True
supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True
supports_stream = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
max_retries: int = 6,
**kwargs
) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
headers = {
'authority': 'chat.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'custom-encoding': get_anti_bot_token(),
'origin': 'https://chat.vercel.ai',
'pragma': 'no-cache',
'referer': 'https://chat.vercel.ai/',
'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
}
json_data = {
'messages': messages,
'id' : f'{os.urandom(3).hex()}a',
}
response = None
for _ in range(max_retries):
response = requests.post('https://chat.vercel.ai/api/chat',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
if not response.ok:
continue
for token in response.iter_content(chunk_size=None):
try:
yield token.decode(errors="ignore")
except UnicodeDecodeError:
pass
break
raise_for_status(response)
def get_anti_bot_token() -> str:
headers = {
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
response = requests.get('https://chat.vercel.ai/openai.jpeg',
headers=headers).text
raw_data = json.loads(base64.b64decode(response,
validate=True))
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"]
raw_token = json.dumps({'r': sec_list, 't': raw_data['t']},
separators = (",", ":"))
return base64.b64encode(raw_token.encode('utf-8')).decode()

View File

@ -5,7 +5,6 @@ from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .selenium import * from .selenium import *
from .needs_auth import * from .needs_auth import *
@ -20,13 +19,14 @@ from .BingCreateImages import BingCreateImages
from .Binjie import Binjie from .Binjie import Binjie
from .Bixin123 import Bixin123 from .Bixin123 import Bixin123
from .Blackbox import Blackbox from .Blackbox import Blackbox
from .ChatGot import ChatGot from .ChatGpt import ChatGpt
from .Chatgpt4Online import Chatgpt4Online from .Chatgpt4Online import Chatgpt4Online
from .Chatgpt4o import Chatgpt4o from .Chatgpt4o import Chatgpt4o
from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree from .ChatgptFree import ChatgptFree
from .CodeNews import CodeNews
from .DDG import DDG from .DDG import DDG
from .DeepInfra import DeepInfra from .DeepInfra import DeepInfra
from .DeepInfraChat import DeepInfraChat
from .DeepInfraImage import DeepInfraImage from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT from .Free2GPT import Free2GPT
@ -35,7 +35,6 @@ from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro from .GeminiPro import GeminiPro
from .GigaChat import GigaChat from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace from .HuggingFace import HuggingFace
from .Koala import Koala from .Koala import Koala
@ -44,7 +43,7 @@ from .LiteIcoding import LiteIcoding
from .Local import Local from .Local import Local
from .MagickPen import MagickPen from .MagickPen import MagickPen
from .MetaAI import MetaAI from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount #from .MetaAIAccount import MetaAIAccount
from .Nexra import Nexra from .Nexra import Nexra
from .Ollama import Ollama from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs from .PerplexityLabs import PerplexityLabs
@ -52,17 +51,12 @@ from .Pi import Pi
from .Pizzagpt import Pizzagpt from .Pizzagpt import Pizzagpt
from .Prodia import Prodia from .Prodia import Prodia
from .Reka import Reka from .Reka import Reka
from .Snova import Snova
from .Replicate import Replicate from .Replicate import Replicate
from .ReplicateHome import ReplicateHome from .ReplicateHome import ReplicateHome
from .TeachAnything import TeachAnything from .TeachAnything import TeachAnything
from .TwitterBio import TwitterBio
from .Upstage import Upstage from .Upstage import Upstage
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You from .You import You
from .ChatGpt import ChatGpt
from .ChatGptEs import ChatGptEs
import sys import sys

View File

@ -1,51 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class Acytoo(AsyncGeneratorProvider):
url = 'https://chat.acytoo.com'
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
f'{cls.url}/api/completions',
proxy=proxy,
json=_create_payload(messages, **kwargs)
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header():
return {
'accept': '*/*',
'content-type': 'application/json',
}
def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
return {
'key' : '',
'model' : 'gpt-3.5-turbo',
'messages' : messages,
'temperature' : temperature,
'password' : ''
}

View File

@ -1,46 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me"
supports_message_history = True
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/plain, */*",
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers) as session:
data = {
"continuous": True,
"id": "fRMSQtuHl91A4De9cCvKD",
"list": messages,
"models": "0",
"prompt": "",
"temperature": kwargs.get("temperature", 0.5),
"title": "",
}
buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
buffer += chunk.decode()
if not rate_limit.startswith(buffer):
yield buffer
buffer = ""
elif buffer == rate_limit:
raise RuntimeError("Rate limit reached")

View File

@ -1,39 +0,0 @@
from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
from ..base_provider import AbstractProvider
class AiService(AbstractProvider):
url = "https://aiservice.vercel.app/"
working = False
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
**kwargs: Any,
) -> CreateResult:
base = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"Referer": "https://aiservice.vercel.app/chat",
}
data = {"input": base}
url = "https://aiservice.vercel.app/api/chat/answer"
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
yield response.json()["data"]

View File

@ -1,46 +0,0 @@
from __future__ import annotations
import time
import hashlib
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
from ..base_provider import AsyncGeneratorProvider
class Aibn(AsyncGeneratorProvider):
url = "https://aibn.cc"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
timestamp = int(time.time())
data = {
"messages": messages,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
"time": timestamp
}
async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -1,64 +0,0 @@
from __future__ import annotations
from ...typing import Messages
from ..base_provider import AsyncProvider, format_prompt
from ..helper import get_cookies
from ...requests import StreamSession
class Aichat(AsyncProvider):
url = "https://chat-gpt.org/chat"
working = False
supports_gpt_35_turbo = True
@staticmethod
async def create_async(
model: str,
messages: Messages,
proxy: str = None, **kwargs) -> str:
cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
if not cookies:
raise RuntimeError(
"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
)
headers = {
'authority': 'chat-gpt.org',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'content-type': 'application/json',
'origin': 'https://chat-gpt.org',
'referer': 'https://chat-gpt.org/chat',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(headers=headers,
cookies=cookies,
timeout=6,
proxies={"https": proxy} if proxy else None,
impersonate="chrome110", verify=False) as session:
json_data = {
"message": format_prompt(messages),
"temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0,
"top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
async with session.post("https://chat-gpt.org/api/text",
json=json_data) as response:
response.raise_for_status()
result = await response.json()
if not result['response']:
raise Exception(f"Error Response: {result}")
return result["message"]

View File

@ -1,90 +0,0 @@
from __future__ import annotations
import hashlib
import time
import uuid
import json
from datetime import datetime
from aiohttp import ClientSession
from ...typing import SHA256, AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class Ails(AsyncGeneratorProvider):
url = "https://ai.ls"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@staticmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"authority": "api.caipacity.com",
"accept": "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"authorization": "Bearer free",
"client-id": str(uuid.uuid4()),
"client-v": "0.1.278",
"content-type": "application/json",
"origin": "https://ai.ls",
"referer": "https://ai.ls/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"from-url": "https://ai.ls/?chat=1"
}
async with ClientSession(
headers=headers
) as session:
timestamp = _format_timestamp(int(time.time() * 1000))
json_data = {
"model": "gpt-3.5-turbo",
"temperature": kwargs.get("temperature", 0.6),
"stream": True,
"messages": messages,
"d": datetime.now().strftime("%Y-%m-%d"),
"t": timestamp,
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
}
async with session.post(
"https://api.caipacity.com/v1/chat/completions",
proxy=proxy,
json=json_data
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start) and line != "data: [DONE]":
line = line[len(start):-1]
line = json.loads(line)
token = line["choices"][0]["delta"].get("content")
if token:
if "ai.ls" in token or "ai.ci" in token:
raise Exception(f"Response Error: {token}")
yield token
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
def _format_timestamp(timestamp: int) -> str:
e = timestamp
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)

View File

@ -1,73 +0,0 @@
from __future__ import annotations
import requests
import json
from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
class Aivvm(AbstractProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = False
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
def create_completion(cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model is not supported: {model}")
json_data = {
"model" : models[model],
"messages" : messages,
"key" : "",
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
data = json.dumps(json_data)
headers = {
"accept" : "text/event-stream",
"accept-language" : "en-US,en;q=0.9",
"content-type" : "application/json",
"content-length" : str(len(data)),
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"sec-gpc" : "1",
"referrer" : "https://chat.aivvm.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}
response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=4096):
try:
yield chunk.decode("utf-8")
except UnicodeDecodeError:
yield chunk.decode("unicode-escape")

View File

@ -1,78 +0,0 @@
from __future__ import annotations
import secrets
import uuid
import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class Berlin(AsyncGeneratorProvider):
url = "https://ai.berlin4h.top"
working = False
supports_gpt_35_turbo = True
_token = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "ai.berlin4h.top",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
if not cls._token:
data = {
"account": '免费使用GPT3.5模型@163.com',
"password": '659e945c2d004686bad1a75b708c962f'
}
async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._token = (await response.json())["data"]["token"]
headers = {
"token": cls._token
}
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"parentMessageId": str(uuid.uuid4()),
"options": {
"model": model,
"temperature": 0,
"presence_penalty": 0,
"frequency_penalty": 0,
"max_tokens": 1888,
**kwargs
},
}
async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.strip():
try:
yield json.loads(chunk)["content"]
except:
raise RuntimeError(f"Response: {chunk.decode()}")

View File

@ -1,54 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class ChatAnywhere(AsyncGeneratorProvider):
url = "https://chatanywhere.cn"
supports_gpt_35_turbo = True
supports_message_history = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
temperature: float = 0.5,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Authorization": "",
"Connection": "keep-alive",
"TE": "trailers"
}
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
data = {
"list": messages,
"id": "s1_qYuOLXjI3rEpc7WHfQ",
"title": messages[-1]["content"],
"prompt": "",
"temperature": temperature,
"models": "61490748",
"continuous": True
}
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()

View File

@ -1,47 +0,0 @@
from __future__ import annotations
from ...typing import Messages
from ...requests import StreamSession
from ..base_provider import AsyncProvider, format_prompt
class ChatgptDuo(AsyncProvider):
url = "https://chatgptduo.com"
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> str:
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
prompt = format_prompt(messages),
data = {
"prompt": prompt,
"search": prompt,
"purpose": "ask",
}
response = await session.post(f"{cls.url}/", data=data)
response.raise_for_status()
data = response.json()
cls._sources = [{
"title": source["title"],
"url": source["link"],
"snippet": source["snippet"]
} for source in data["results"]]
return data["answer"]
@classmethod
def get_sources(cls):
return cls._sources

View File

@ -1,52 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider
class CodeLinkAva(AsyncGeneratorProvider):
url = "https://ava-ai-ef611.web.app"
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"messages": messages,
"temperature": 0.6,
"stream": True,
**kwargs
}
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View File

@ -1,50 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from hashlib import sha256
from ...typing import AsyncResult, Messages, Dict
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class Cromicle(AsyncGeneratorProvider):
url: str = 'https://cromicle.top'
working: bool = False
supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
f'{cls.url}/chat',
proxy=proxy,
json=_create_payload(format_prompt(messages))
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header() -> Dict[str, str]:
return {
'accept': '*/*',
'content-type': 'application/json',
}
def _create_payload(message: str) -> Dict[str, str]:
return {
'message': message,
'token': 'abc',
'hash': sha256('abc'.encode() + message.encode()).hexdigest()
}

View File

@ -1,62 +0,0 @@
from __future__ import annotations
import json
import re
import time
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class DfeHub(AbstractProvider):
url = "https://chat.dfehub.com/"
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
"authority" : "chat.dfehub.com",
"accept" : "*/*",
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type" : "application/json",
"origin" : "https://chat.dfehub.com",
"referer" : "https://chat.dfehub.com/",
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"x-requested-with" : "XMLHttpRequest",
}
json_data = {
"messages" : messages,
"model" : "gpt-3.5-turbo",
"temperature" : kwargs.get("temperature", 0.5),
"presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p" : kwargs.get("top_p", 1),
"stream" : True
}
response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
headers=headers, json=json_data, timeout=3)
for chunk in response.iter_lines():
if b"detail" in chunk:
delay = re.findall(r"\d+\.\d+", chunk.decode())
delay = float(delay[-1])
time.sleep(delay)
yield from DfeHub.create_completion(model, messages, stream, **kwargs)
if b"content" in chunk:
data = json.loads(chunk.decode().split("data: ")[1])
yield (data["choices"][0]["delta"]["content"])

View File

@ -1,89 +0,0 @@
from __future__ import annotations
import json
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class EasyChat(AbstractProvider):
url: str = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
active_servers = [
"https://chat10.fastgpt.me",
"https://chat9.fastgpt.me",
"https://chat1.fastgpt.me",
"https://chat2.fastgpt.me",
"https://chat3.fastgpt.me",
"https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
]
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = {
"authority" : f"{server}".replace("https://", ""),
"accept" : "text/event-stream",
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
"content-type" : "application/json",
"origin" : f"{server}",
"referer" : f"{server}/",
"x-requested-with" : "XMLHttpRequest",
'plugins' : '0',
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
"messages" : messages,
"stream" : stream,
"model" : model,
"temperature" : kwargs.get("temperature", 0.5),
"presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p" : kwargs.get("top_p", 1)
}
session = requests.Session()
# init cookies from server
session.get(f"{server}/")
response = session.post(f"{server}/api/openai/v1/chat/completions",
headers=headers, json=json_data, stream=stream)
if response.status_code != 200:
raise Exception(f"Error {response.status_code} from server : {response.reason}")
if not stream:
json_data = response.json()
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
raise Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]

View File

@ -1,71 +0,0 @@
from __future__ import annotations
import json
from abc import ABC, abstractmethod
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Equing(AbstractProvider):
url: str = 'https://next.eqing.tech/'
working = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'authority' : 'next.eqing.tech',
'accept' : 'text/event-stream',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'origin' : 'https://next.eqing.tech',
'plugins' : '0',
'pragma' : 'no-cache',
'referer' : 'https://next.eqing.tech/',
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
'messages' : messages,
'stream' : stream,
'model' : model,
'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token

View File

@ -1,91 +0,0 @@
from __future__ import annotations
import uuid, time, random, json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_random_string
class FakeGpt(AsyncGeneratorProvider):
url = "https://chat-shared2.zhile.io"
supports_gpt_35_turbo = True
working = False
_access_token = None
_cookie_jar = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
"Referer": "https://chat-shared2.zhile.io/?v=2",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-mobile": "?0",
}
async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
if not cls._access_token:
async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
response.raise_for_status()
list = (await response.json())["loads"]
token_ids = [t["token_id"] for t in list]
data = {
"token_key": random.choice(token_ids),
"session_password": get_random_string()
}
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
response.raise_for_status()
async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
response.raise_for_status()
cls._access_token = (await response.json())["accessToken"]
cls._cookie_jar = session.cookie_jar
headers = {
"Content-Type": "application/json",
"Accept": "text/event-stream",
"X-Authorization": f"Bearer {cls._access_token}",
}
prompt = format_prompt(messages)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
"metadata": {},
}
],
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"plugin_ids": [],
"timezone_offset_min": -120,
"suggestions": [],
"history_and_training_disabled": True,
"arkose_token": "",
"force_paragen": False,
}
last_message = ""
async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
async for line in response.content:
if line.startswith(b"data: "):
line = line[6:]
if line == b"[DONE]":
break
try:
line = json.loads(line)
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
except:
continue
if not last_message:
raise RuntimeError("No valid response")

View File

@ -1,76 +0,0 @@
from __future__ import annotations
import json
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class FastGpt(AbstractProvider):
url: str = 'https://chat9.fastgpt.me/'
working = False
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'authority' : 'chat9.fastgpt.me',
'accept' : 'text/event-stream',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'origin' : 'https://chat9.fastgpt.me',
'plugins' : '0',
'pragma' : 'no-cache',
'referer' : 'https://chat9.fastgpt.me/',
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest',
}
json_data = {
'messages' : messages,
'stream' : stream,
'model' : model,
'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
subdomain = random.choice([
'jdaen979ew',
'chat9'
])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get(
'content'
)
if token:
yield token
except:
continue

View File

@ -1,40 +0,0 @@
from __future__ import annotations
import json
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Forefront(AbstractProvider):
url = "https://forefront.com"
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
json_data = {
"text" : messages[-1]["content"],
"action" : "noauth",
"id" : "",
"parentId" : "",
"workspaceId" : "",
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
"model" : "gpt-4",
"messages" : messages[:-1] if len(messages) > 1 else [],
"internetMode" : "auto",
}
response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
json=json_data, stream=True)
response.raise_for_status()
for token in response.iter_lines():
if b"delta" in token:
yield json.loads(token.decode().split("data: ")[1])["delta"]

View File

@ -1,87 +0,0 @@
from __future__ import annotations
import secrets, time, json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net"
working = False
supports_gpt_35_turbo = True
_auth = None
used_times = 0
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
timestamp = int(time.time())
headers = {
'authority': 'gptalk.net',
'accept': '*/*',
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
'content-type': 'application/json',
'origin': 'https://gptalk.net',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
'x-auth-appid': '2229',
'x-auth-openid': '',
'x-auth-platform': '',
'x-auth-timestamp': f"{timestamp}",
}
async with ClientSession(headers=headers) as session:
if not cls._auth or cls._auth["expires_at"] < timestamp or cls.used_times == 5:
data = {
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
}
async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
cls.used_times = 0
data = {
"content": format_prompt(messages),
"accept": "stream",
"from": 1,
"model": model,
"is_mobile": 0,
"user_agent": headers["user-agent"],
"is_open_ctx": 0,
"prompt": "",
"roid": 111,
"temperature": 0,
"ctx_msg_count": 3,
"created_at": timestamp
}
headers = {
'authorization': f'Bearer {cls._auth["token"]}',
}
async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
cls.used_times += 1
last_message = ""
async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
if line.startswith(b"data: [DONE]"):
break
message = json.loads(line[6:-1])["content"]
yield message[len(last_message):]
last_message = message

View File

@ -1,73 +0,0 @@
from __future__ import annotations
import requests, json
from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
from json import dumps
class GeekGpt(AbstractProvider):
url = 'https://chat.geekgpt.org'
working = False
supports_message_history = True
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
json_data = {
'messages': messages,
'model': model,
'temperature': kwargs.get('temperature', 0.9),
'presence_penalty': kwargs.get('presence_penalty', 0),
'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'stream': True
}
data = dumps(json_data, separators=(',', ':'))
headers = {
'authority': 'ai.fakeopen.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone',
'content-type': 'application/json',
'origin': 'https://chat.geekgpt.org',
'referer': 'https://chat.geekgpt.org/',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
response = requests.post("https://ai.fakeopen.com/v1/chat/completions",
headers=headers, data=data, stream=True)
response.raise_for_status()
for chunk in response.iter_lines():
if b'content' in chunk:
json_data = chunk.decode().replace("data: ", "")
if json_data == "[DONE]":
break
try:
content = json.loads(json_data)["choices"][0]["delta"].get("content")
except Exception as e:
raise RuntimeError(f'error | {e} :', json_data)
if content:
yield content

View File

@ -1,77 +0,0 @@
from __future__ import annotations
import json
import os
import uuid
import requests
# try:
# from Crypto.Cipher import AES
# except ImportError:
# from Cryptodome.Cipher import AES
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class GetGpt(AbstractProvider):
url = 'https://chat.getgpt.world/'
supports_stream = True
working = False
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'Content-Type' : 'application/json',
'Referer' : 'https://chat.getgpt.world/',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
}
data = json.dumps(
{
'messages' : messages,
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'max_tokens' : kwargs.get('max_tokens', 4000),
'model' : 'gpt-3.5-turbo',
'presence_penalty' : kwargs.get('presence_penalty', 0),
'temperature' : kwargs.get('temperature', 1),
'top_p' : kwargs.get('top_p', 1),
'stream' : True,
'uuid' : str(uuid.uuid4())
}
)
res = requests.post('https://chat.getgpt.world/api/chat/stream',
headers=headers, json={'signature': _encrypt(data)}, stream=True)
res.raise_for_status()
for line in res.iter_lines():
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
yield (line_json['choices'][0]['delta']['content'])
def _encrypt(e: str):
# t = os.urandom(8).hex().encode('utf-8')
# n = os.urandom(8).hex().encode('utf-8')
# r = e.encode('utf-8')
# cipher = AES.new(t, AES.MODE_CBC, n)
# ciphertext = cipher.encrypt(_pad_data(r))
# return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
return
def _pad_data(data: bytes) -> bytes:
# block_size = AES.block_size
# padding_size = block_size - len(data) % block_size
# padding = bytes([padding_size] * padding_size)
# return data + padding
return

View File

@ -1,89 +0,0 @@
from __future__ import annotations
import json
import uuid
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": f"{cls.url}/"}
async with ClientSession(
headers=headers
) as session:
data = {
"ethicsModalAccepted": "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": model,
"searchEnabled": "true",
}
async with session.post(
f"{cls.url}/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
async with session.post(
f"{cls.url}/conversation",
proxy=proxy,
json={"model": model},
) as response:
response.raise_for_status()
conversationId = (await response.json())["conversationId"]
data = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.4,
"truncate": 2048,
"max_new_tokens": 1024,
"do_sample": True,
"repetition_penalty": 1.2,
"return_full_text": False,
**kwargs
},
"stream": True,
"options": {
"id": str(uuid.uuid4()),
"response_id": str(uuid.uuid4()),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
async with session.post(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
start = "data:"
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):-1])
if not line["token"]["special"]:
yield line["token"]["text"]
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
) as response:
response.raise_for_status()

View File

@ -1,80 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_random_hex
class SearchTypes():
quick = "quick"
code = "code"
websearch = "websearch"
class Hashnode(AsyncGeneratorProvider):
url = "https://hashnode.com"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
_sources = []
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
search_type: str = SearchTypes.websearch,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/rix",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"]
cls._sources = []
if search_type == "websearch":
async with session.post(
f"{cls.url}/api/ai/rix/search",
json={"prompt": prompt},
proxy=proxy,
) as response:
response.raise_for_status()
cls._sources = (await response.json())["result"]
data = {
"chatId": get_random_hex(),
"history": messages,
"prompt": prompt,
"searchType": search_type,
"urlToScan": None,
"searchResults": cls._sources,
}
async with session.post(
f"{cls.url}/api/ai/rix/completion",
json=data,
proxy=proxy,
) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()
@classmethod
def get_sources(cls) -> list:
return [{
"title": source["name"],
"url": source["url"]
} for source in cls._sources]

View File

@ -1,54 +0,0 @@
from __future__ import annotations
import json
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Lockchat(AbstractProvider):
url: str = "http://supertest.lockchat.app"
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
temperature = float(kwargs.get("temperature", 0.7))
payload = {
"temperature": temperature,
"messages" : messages,
"model" : model,
"stream" : True,
}
headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
}
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
response.raise_for_status()
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...")
Lockchat.create_completion(
model = model,
messages = messages,
stream = stream,
temperature = temperature,
**kwargs)
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content")
if token:
yield (token)

View File

@ -1,165 +0,0 @@
# not using WS anymore
from __future__ import annotations
import json, uuid, hashlib, time, random
from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
models = {
"samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
"gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
"gpt-4": "01c8de4fbfc548df903712b0922a4e01",
}
class Myshell(AsyncGeneratorProvider):
url = "https://app.myshell.ai/chat"
working = False
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 90,
**kwargs
) -> AsyncResult:
if not model:
bot_id = models["samantha"]
elif model in models:
bot_id = models[model]
else:
raise ValueError(f"Model are not supported: {model}")
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
visitor_id = generate_visitor_id(user_agent)
async with ClientSession(
headers={'User-Agent': user_agent}
) as session:
async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False,
timeout=timeout,
proxy=proxy
) as wss:
# Send and receive hello message
await wss.receive_str()
message = json.dumps({"token": None, "visitorId": visitor_id})
await wss.send_str(f"40/chat,{message}")
await wss.receive_str()
# Fix "need_verify_captcha" issue
await asyncio.sleep(5)
# Create chat message
text = format_prompt(messages)
chat_data = json.dumps(["text_chat",{
"reqId": str(uuid.uuid4()),
"botUid": bot_id,
"sourceFrom": "myshellWebsite",
"text": text,
**generate_signature(text)
}])
# Send chat message
chat_start = "42/chat,"
chat_message = f"{chat_start}{chat_data}"
await wss.send_str(chat_message)
# Receive messages
async for message in wss:
if message.type != WSMsgType.TEXT:
continue
# Ping back
if message.data == "2":
await wss.send_str("3")
continue
# Is not chat message
if not message.data.startswith(chat_start):
continue
data_type, data = json.loads(message.data[len(chat_start):])
if data_type == "text_stream":
if data["data"]["text"]:
yield data["data"]["text"]
elif data["data"]["isFinal"]:
break
elif data_type in ("message_replied", "need_verify_captcha"):
raise RuntimeError(f"Received unexpected message: {data_type}")
def generate_timestamp() -> str:
return str(
int(
str(int(time.time() * 1000))[:-1]
+ str(
sum(
2 * int(digit)
if idx % 2 == 0
else 3 * int(digit)
for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
)
% 10
)
)
)
def generate_signature(text: str):
timestamp = generate_timestamp()
version = 'v1.0.0'
secret = '8@VXGK3kKHr!u2gA'
data = f"{version}#{text}#{timestamp}#{secret}"
signature = hashlib.md5(data.encode()).hexdigest()
signature = signature[::-1]
return {
"signature": signature,
"timestamp": timestamp,
"version": version
}
def xor_hash(B: str):
r = []
i = 0
def o(e, t):
o_val = 0
for i in range(len(t)):
o_val |= r[i] << (8 * i)
return e ^ o_val
for e in range(len(B)):
t = ord(B[e])
r.insert(0, 255 & t)
if len(r) >= 4:
i = o(i, r)
r = []
if len(r) > 0:
i = o(i, r)
return hex(i)[2:]
def performance() -> str:
t = int(time.time() * 1000)
e = 0
while t == int(time.time() * 1000):
e += 1
return hex(t)[2:] + hex(e)[2:]
def generate_visitor_id(user_agent: str) -> str:
f = performance()
r = hex(int(random.random() * (16**16)))[2:-2]
d = xor_hash(user_agent)
e = hex(1080 * 1920)[2:]
return f"{f}-{r}-{d}-{e}-{f}"

View File

@ -1,66 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
class NoowAi(AsyncGeneratorProvider):
url = "https://noowai.com"
supports_message_history = True
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "noowai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "d49bc3670c3d858458576d75c8ea0f5d",
"session": "N/A",
"chatId": get_random_string(),
"contextId": 25,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
elif line["type"] == "error":
raise RuntimeError(line["data"])

View File

@ -1,59 +0,0 @@
from __future__ import annotations
import random, string, json
from aiohttp import ClientSession
from ...typing import Messages, AsyncResult
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_random_string
class Opchatgpts(AsyncGeneratorProvider):
url = "https://opchatgpts.net"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None, **kwargs) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-Language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Alt-Used" : "opchatgpts.net",
"Referer" : f"{cls.url}/chatgpt-free-use/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"botId": "default",
"chatId": get_random_string(),
"contextId": 28,
"customId": None,
"messages": messages,
"newMessage": messages[-1]["content"],
"session": "N/A",
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break

View File

@ -1,88 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = False
model = "OA_SFT_Llama_30B_6"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncResult:
if not cookies:
cookies = get_cookies("open-assistant.io")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
chat_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
"parent_id": None
}
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
parent_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"parent_id": parent_id,
"model_config_name": model if model else cls.model,
"sampling_parameters":{
"top_k": 50,
"top_p": None,
"typical_p": None,
"temperature": 0.35,
"repetition_penalty": 1.1111111111111112,
"max_new_tokens": 1024,
**kwargs
},
"plugins":[]
}
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
data = await response.json()
if "id" in data:
message_id = data["id"]
elif "message" in data:
raise RuntimeError(data["message"])
else:
response.raise_for_status()
params = {
'chat_id': chat_id,
'message_id': message_id,
}
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
start = "data: "
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):])
if line["event_type"] == "token":
yield line["text"]
params = {
'chat_id': chat_id,
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()

View File

@ -1,140 +0,0 @@
from __future__ import annotations
import re
import json
from urllib import parse
from datetime import datetime
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ...requests import StreamSession
class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com"
working = False
lockdown = True
supports_stream = True
supports_message_history = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
creative_mode: bool = False,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "*/*",
"Origin": cls.url,
"Referer": f"{cls.url}/search",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with StreamSession(
headers=headers,
impersonate="chrome",
proxies={"https": proxy},
timeout=timeout
) as session:
url = "https://www.phind.com/search?home=true"
async with session.get(url) as response:
text = await response.text()
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(?P<json>[\S\s]+?)</script>', text)
data = json.loads(match.group("json"))
challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
prompt = messages[-1]["content"]
data = {
"question": prompt,
"question_history": [
message["content"] for message in messages[:-1] if message["role"] == "user"
],
"answer_history": [
message["content"] for message in messages if message["role"] == "assistant"
],
"webResults": [],
"options": {
"date": datetime.now().strftime("%d.%m.%Y"),
"language": "en-US",
"detailed": True,
"anonUserId": "",
"answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
"creativeMode": creative_mode,
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
}
data["challenge"] = generate_challenge(data, **challenge_seeds)
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
if line.startswith(b"data: "):
chunk = line[6:]
if chunk.startswith(b'<PHIND_DONE/>'):
break
if chunk.startswith(b'<PHIND_BACKEND_ERROR>'):
raise RuntimeError(f"Response: {chunk.decode()}")
if chunk.startswith(b'<PHIND_WEBRESULTS>') or chunk.startswith(b'<PHIND_FOLLOWUP>'):
pass
elif chunk.startswith(b"<PHIND_METADATA>") or chunk.startswith(b"<PHIND_INDICATOR>"):
pass
elif chunk.startswith(b"<PHIND_SPAN_BEGIN>") or chunk.startswith(b"<PHIND_SPAN_END>"):
pass
elif chunk:
yield chunk.decode()
elif new_line:
yield "\n"
new_line = False
else:
new_line = True
def deterministic_stringify(obj):
def handle_value(value):
if isinstance(value, (dict, list)):
if isinstance(value, list):
return '[' + ','.join(sorted(map(handle_value, value))) + ']'
else: # It's a dict
return '{' + deterministic_stringify(value) + '}'
elif isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, (int, float)):
return format(value, '.8f').rstrip('0').rstrip('.')
elif isinstance(value, str):
return f'"{value}"'
else:
return 'null'
items = sorted(obj.items(), key=lambda x: x[0])
return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
def prng_general(seed, multiplier, addend, modulus):
a = seed * multiplier + addend
if a < 0:
return ((a%modulus)-modulus)/modulus
else:
return a%modulus/modulus
def generate_challenge_seed(l):
I = deterministic_stringify(l)
d = parse.quote(I, safe='')
return simple_hash(d)
def simple_hash(s):
d = 0
for char in s:
if len(char) > 1 or ord(char) >= 256:
continue
d = ((d << 5) - d + ord(char[0])) & 0xFFFFFFFF
if d > 0x7FFFFFFF: # 2147483647
d -= 0x100000000 # Subtract 2**32
return d
def generate_challenge(obj, **kwargs):
return prng_general(
seed=generate_challenge_seed(obj),
**kwargs
)

View File

@ -1,61 +0,0 @@
from __future__ import annotations
import uuid
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class V50(AbstractProvider):
url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True
supports_stream = False
needs_auth = False
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
conversation = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
payload = {
"prompt" : conversation,
"options" : {},
"systemMessage" : ".",
"temperature" : kwargs.get("temperature", 0.4),
"top_p" : kwargs.get("top_p", 0.4),
"model" : model,
"user" : str(uuid.uuid4())
}
headers = {
'authority' : 'p5.v50.ltd',
'accept' : 'application/json, text/plain, */*',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type' : 'application/json',
'origin' : 'https://p5.v50.ltd',
'referer' : 'https://p5.v50.ltd/',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post(
"https://p5.v50.ltd/api/chat-process",
json=payload,
headers=headers,
proxies=kwargs.get('proxy', {}),
)
if "https://fk1.v50.ltd" not in response.text:
yield response.text

View File

@ -1,392 +0,0 @@
from __future__ import annotations
import json, base64, requests, random, uuid
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ...typing import Messages, TypedDict, CreateResult, Any
from ..base_provider import AbstractProvider
from ...errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://sdk.vercel.ai'
working = False
supports_message_history = True
supports_gpt_35_turbo = True
supports_stream = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Vercel does not support {model}")
headers = {
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'custom-encoding': get_anti_bot_token(),
'origin': 'https://sdk.vercel.ai',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
json_data = {
'model' : model_info[model]['id'],
'messages' : messages,
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0,
**model_info[model]['default_params'],
**kwargs
}
max_retries = kwargs.get('max_retries', 20)
for _ in range(max_retries):
response = requests.post('https://chat.vercel.ai/api/chat',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
response.raise_for_status()
except:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()
break
def get_anti_bot_token() -> str:
headers = {
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
response = requests.get('https://sdk.vercel.ai/openai.jpeg',
headers=headers).text
raw_data = json.loads(base64.b64decode(response,
validate=True))
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
separators = (",", ":"))
return base64.b64encode(raw_token.encode('utf-16le')).decode()
class ModelInfo(TypedDict):
id: str
default_params: dict[str, Any]
model_info: dict[str, ModelInfo] = {
# 'claude-instant-v1': {
# 'id': 'anthropic:claude-instant-v1',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
# 'claude-v1': {
# 'id': 'anthropic:claude-v1',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
# 'claude-v2': {
# 'id': 'anthropic:claude-v2',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
'replicate/llama70b-v2-chat': {
'id': 'replicate:replicate/llama-2-70b-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'a16z-infra/llama7b-v2-chat': {
'id': 'replicate:a16z-infra/llama7b-v2-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'a16z-infra/llama13b-v2-chat': {
'id': 'replicate:a16z-infra/llama13b-v2-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'replicate/llama-2-70b-chat': {
'id': 'replicate:replicate/llama-2-70b-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'bigscience/bloom': {
'id': 'huggingface:bigscience/bloom',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'google/flan-t5-xxl': {
'id': 'huggingface:google/flan-t5-xxl',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'EleutherAI/gpt-neox-20b': {
'id': 'huggingface:EleutherAI/gpt-neox-20b',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
'stopSequences': [],
},
},
'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
'default_params': {
'maximumLength': 1024,
'typicalP': 0.2,
'repetitionPenalty': 1,
},
},
'OpenAssistant/oasst-sft-1-pythia-12b': {
'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
'default_params': {
'maximumLength': 1024,
'typicalP': 0.2,
'repetitionPenalty': 1,
},
},
'bigcode/santacoder': {
'id': 'huggingface:bigcode/santacoder',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'command-light-nightly': {
'id': 'cohere:command-light-nightly',
'default_params': {
'temperature': 0.9,
'maximumLength': 1024,
'topP': 1,
'topK': 0,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'command-nightly': {
'id': 'cohere:command-nightly',
'default_params': {
'temperature': 0.9,
'maximumLength': 1024,
'topP': 1,
'topK': 0,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
# 'gpt-4': {
# 'id': 'openai:gpt-4',
# 'default_params': {
# 'temperature': 0.7,
# 'maximumLength': 8192,
# 'topP': 1,
# 'presencePenalty': 0,
# 'frequencyPenalty': 0,
# 'stopSequences': [],
# },
# },
# 'gpt-4-0613': {
# 'id': 'openai:gpt-4-0613',
# 'default_params': {
# 'temperature': 0.7,
# 'maximumLength': 8192,
# 'topP': 1,
# 'presencePenalty': 0,
# 'frequencyPenalty': 0,
# 'stopSequences': [],
# },
# },
'code-davinci-002': {
'id': 'openai:code-davinci-002',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'gpt-3.5-turbo': {
'id': 'openai:gpt-3.5-turbo',
'default_params': {
'temperature': 0.7,
'maximumLength': 4096,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'gpt-3.5-turbo-16k': {
'id': 'openai:gpt-3.5-turbo-16k',
'default_params': {
'temperature': 0.7,
'maximumLength': 16280,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'gpt-3.5-turbo-16k-0613': {
'id': 'openai:gpt-3.5-turbo-16k-0613',
'default_params': {
'temperature': 0.7,
'maximumLength': 16280,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'text-ada-001': {
'id': 'openai:text-ada-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-babbage-001': {
'id': 'openai:text-babbage-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-curie-001': {
'id': 'openai:text-curie-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-davinci-002': {
'id': 'openai:text-davinci-002',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-davinci-003': {
'id': 'openai:text-davinci-003',
'default_params': {
'temperature': 0.5,
'maximumLength': 4097,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
}

View File

@ -1,55 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..base_provider import AsyncGeneratorProvider
from ...typing import AsyncResult, Messages
class Vitalentum(AsyncGeneratorProvider):
url = "https://app.vitalentum.io"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "text/event-stream",
"Accept-language": "de,en-US;q=0.7,en;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
"text": message["content"],
} for message in messages]})
data = {
"conversation": conversation,
"temperature": 0.7,
**kwargs
}
async with ClientSession(
headers=headers
) as session:
async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View File

@ -1,91 +0,0 @@
from __future__ import annotations
import json
import requests
from ..base_provider import AbstractProvider
from ...typing import Messages, CreateResult
class VoiGpt(AbstractProvider):
"""
VoiGpt - A provider for VoiGpt.com
**Note** : to use this provider you have to get your csrf token/cookie from the voigpt.com website
Args:
model: The model to use
messages: The messages to send
stream: Whether to stream the response
proxy: The proxy to use
access_token: The access token to use
**kwargs: Additional keyword arguments
Returns:
A CreateResult object
"""
url = "https://voigpt.com"
working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_stream = False
_access_token: str = None
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
access_token: str = None,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
if not access_token:
access_token = cls._access_token
if not access_token:
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"sec-ch-ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
}
req_response = requests.get(cls.url, headers=headers)
access_token = cls._access_token = req_response.cookies.get("csrftoken")
headers = {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"Cookie": f"csrftoken={access_token};",
"Origin": "https://voigpt.com",
"Referer": "https://voigpt.com/",
"Sec-Ch-Ua": "'Google Chrome';v='119', 'Chromium';v='119', 'Not?A_Brand';v='24'",
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": "'Windows'",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"X-Csrftoken": access_token,
}
payload = {
"messages": messages,
}
request_url = f"{cls.url}/generate_response/"
req_response = requests.post(request_url, headers=headers, json=payload)
try:
response = json.loads(req_response.text)
yield response["response"]
except:
raise RuntimeError(f"Response: {req_response.text}")

View File

@ -1,65 +0,0 @@
from __future__ import annotations
import random, string, time
from aiohttp import ClientSession
from ..base_provider import AsyncProvider
class Wewordle(AsyncProvider):
url = "https://wewordle.org"
working = False
supports_gpt_35_turbo = True
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> str:
headers = {
"accept" : "*/*",
"pragma" : "no-cache",
"Content-Type" : "application/json",
"Connection" : "keep-alive"
}
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
data = {
"user" : _user_id,
"messages" : messages,
"subscriber": {
"originalPurchaseDate" : None,
"originalApplicationVersion" : None,
"allPurchaseDatesMillis" : {},
"entitlements" : {"active": {}, "all": {}},
"allPurchaseDates" : {},
"allExpirationDatesMillis" : {},
"allExpirationDates" : {},
"originalAppUserId" : f"$RCAnonymousID:{_app_id}",
"latestExpirationDate" : None,
"requestDate" : _request_date,
"latestExpirationDateMillis" : None,
"nonSubscriptionTransactions" : [],
"originalPurchaseDateMillis" : None,
"managementURL" : None,
"allPurchasedProductIdentifiers": [],
"firstSeen" : _request_date,
"activeSubscriptions" : [],
}
}
async with ClientSession(
headers=headers
) as session:
async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
response.raise_for_status()
content = (await response.json())["message"]["content"]
if content:
return content

View File

@ -1,57 +0,0 @@
from __future__ import annotations
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider, format_prompt
class Wuguokai(AbstractProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
headers = {
'authority': 'ai-api.wuguokai.xyz',
'accept': 'application/json, text/plain, */*',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/json',
'origin': 'https://chat.wuguokai.xyz',
'referer': 'https://chat.wuguokai.xyz/',
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
}
data ={
"prompt": format_prompt(messages),
"options": {},
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
response = requests.post(
"https://ai-api20.wuguokai.xyz/api/chat-process",
headers=headers,
timeout=3,
json=data,
proxies=kwargs.get('proxy', {}),
)
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
if response.status_code != 200:
raise Exception(f"Error: {response.status_code} {response.reason}")
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()

View File

@ -1,58 +0,0 @@
from __future__ import annotations
import json
from ...requests import StreamSession
from ..base_provider import AsyncGeneratorProvider
from ...typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
data = {
"messages": messages,
"model": model,
"temperature": 1,
"presence_penalty": 0,
"top_p": 1,
"frequency_penalty": 0,
"allow_fallback": True,
"stream": stream,
**kwargs
}
async with StreamSession(
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
if stream:
async for line in response.iter_lines():
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
else:
chat = await response.json()
yield chat["choices"][0]["message"].get("content")

View File

@ -1,61 +0,0 @@
from __future__ import annotations
import random
from ...requests import StreamSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
working = False
supports_gpt_35_turbo = True
@staticmethod
async def create_async_generator(
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs,
) -> AsyncResult:
async with StreamSession(
headers=_create_header(), proxies={"https": proxy}, timeout=timeout
) as session:
payload = _create_payload(messages, **kwargs)
async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
response.raise_for_status()
async for chunk in response.iter_content():
if chunk:
chunk = chunk.decode()
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
raise RuntimeError("IP address is blocked by abuse detection.")
yield chunk
def _create_header():
return {
"accept" : "application/json, text/plain, */*",
"content-type" : "application/json",
"origin" : "https://chat9.yqcloud.top",
"referer" : "https://chat9.yqcloud.top/"
}
def _create_payload(
messages: Messages,
system_message: str = "",
user_id: int = None,
**kwargs
):
if not user_id:
user_id = random.randint(1690000544336, 2093025544336)
return {
"prompt": format_prompt(messages),
"network": True,
"system": system_message,
"withoutContext": False,
"stream": True,
"userId": f"#/chat/{user_id}"
}

View File

@ -1,35 +0,0 @@
from .AiService import AiService
from .CodeLinkAva import CodeLinkAva
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .Lockchat import Lockchat
from .Wewordle import Wewordle
from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .H2o import H2o
from .Myshell import Myshell
from .Acytoo import Acytoo
from .Aibn import Aibn
from .Ails import Ails
from .ChatgptDuo import ChatgptDuo
from .Cromicle import Cromicle
from .Opchatgpts import Opchatgpts
from .Yqcloud import Yqcloud
from .Aichat import Aichat
from .Berlin import Berlin
from .Phind import Phind
from .AiAsk import AiAsk
from ..AiChatOnline import AiChatOnline
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
from .OpenAssistant import OpenAssistant

View File

@ -54,6 +54,7 @@ class Gemini(AsyncGeneratorProvider):
url = "https://gemini.google.com" url = "https://gemini.google.com"
needs_auth = True needs_auth = True
working = True working = True
default_model = 'gemini'
image_models = ["gemini"] image_models = ["gemini"]
default_vision_model = "gemini" default_vision_model = "gemini"
_cookies: Cookies = None _cookies: Cookies = None

View File

@ -8,7 +8,7 @@ from ...typing import AsyncResult, Messages
class OpenRouter(Openai): class OpenRouter(Openai):
label = "OpenRouter" label = "OpenRouter"
url = "https://openrouter.ai" url = "https://openrouter.ai"
working = True working = False
default_model = "mistralai/mistral-7b-instruct:free" default_model = "mistralai/mistral-7b-instruct:free"
@classmethod @classmethod

View File

@ -11,7 +11,7 @@ from ...image import to_data_uri
class Openai(AsyncGeneratorProvider, ProviderModelMixin): class Openai(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API" label = "OpenAI API"
url = "https://openai.com" url = "https://platform.openai.com"
working = True working = True
needs_auth = True needs_auth = True
supports_message_history = True supports_message_history = True

View File

@ -61,9 +61,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
default_model = None default_model = None
default_vision_model = "gpt-4o" default_vision_model = "gpt-4o"
models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"] models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
model_aliases = { model_aliases = {
"gpt-4-turbo-preview": "gpt-4", #"gpt-4-turbo": "gpt-4",
"dall-e": "gpt-4", #"gpt-4": "gpt-4-gizmo",
#"dalle": "gpt-4",
} }
_api_key: str = None _api_key: str = None
_headers: dict = None _headers: dict = None

View File

@ -15,7 +15,6 @@ class PerplexityApi(Openai):
"llama-3-sonar-large-32k-online", "llama-3-sonar-large-32k-online",
"llama-3-8b-instruct", "llama-3-8b-instruct",
"llama-3-70b-instruct", "llama-3-70b-instruct",
"mixtral-8x7b-instruct"
] ]
@classmethod @classmethod

View File

@ -7,5 +7,5 @@ from .Poe import Poe
from .Openai import Openai from .Openai import Openai
from .Groq import Groq from .Groq import Groq
from .OpenRouter import OpenRouter from .OpenRouter import OpenRouter
from .OpenaiAccount import OpenaiAccount #from .OpenaiAccount import OpenaiAccount
from .PerplexityApi import PerplexityApi from .PerplexityApi import PerplexityApi

View File

@ -1,80 +0,0 @@
from __future__ import annotations
import time
import os
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
except ImportError:
pass
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider
from ..helper import format_prompt
from ...webdriver import WebDriver, WebDriverSession, element_send_text
class Bard(AbstractProvider):
url = "https://bard.google.com"
working = False
needs_auth = True
webdriver = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
) -> CreateResult:
prompt = format_prompt(messages)
session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
try:
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
if not webdriver:
driver = session.reopen()
driver.get(f"{cls.url}/chat")
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [Google Bard]({login_url})\n\n"
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
else:
raise RuntimeError("Prompt textarea not found. You may not be logged in.")
# Add hook in XMLHttpRequest
script = """
const _http_request_open = XMLHttpRequest.prototype.open;
window._message = "";
XMLHttpRequest.prototype.open = function(method, url) {
if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) {
this.addEventListener("load", (event) => {
window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0];
});
}
return _http_request_open.call(this, method, url);
}
"""
driver.execute_script(script)
element_send_text(driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea"), prompt)
while True:
chunk = driver.execute_script("return window._message;")
if chunk:
yield chunk
return
else:
time.sleep(0.1)

View File

@ -9,7 +9,7 @@ from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
class MyShell(AbstractProvider): class MyShell(AbstractProvider):
url = "https://app.myshell.ai/chat" url = "https://app.myshell.ai/chat"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True supports_stream = True

View File

@ -16,7 +16,7 @@ from ...webdriver import WebDriver, WebDriverSession, element_send_text
class PerplexityAi(AbstractProvider): class PerplexityAi(AbstractProvider):
url = "https://www.perplexity.ai" url = "https://www.perplexity.ai"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True supports_stream = True

View File

@ -8,7 +8,7 @@ from ...webdriver import WebDriver, WebDriverSession
class TalkAi(AbstractProvider): class TalkAi(AbstractProvider):
url = "https://talkai.info" url = "https://talkai.info"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True supports_stream = True

View File

@ -2,4 +2,3 @@ from .MyShell import MyShell
from .PerplexityAi import PerplexityAi from .PerplexityAi import PerplexityAi
from .Phind import Phind from .Phind import Phind
from .TalkAi import TalkAi from .TalkAi import TalkAi
from .Bard import Bard

View File

@ -1,66 +0,0 @@
from __future__ import annotations
from urllib.parse import unquote
from ...typing import AsyncResult, Messages
from ..base_provider import AbstractProvider
from ...webdriver import WebDriver
from ...requests import Session, get_session_from_browser
class AiChatting(AbstractProvider):
url = "https://www.aichatting.net"
supports_gpt_35_turbo = True
_session: Session = None
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
if not cls._session:
cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout)
visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId"))
headers = {
"accept": "application/json, text/plain, */*",
"lang": "en",
"source": "web"
}
data = {
"roleId": 0,
}
try:
response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers)
response.raise_for_status()
conversation_id = response.json()["data"]["conversationId"]
except Exception as e:
cls.reset()
raise e
headers = {
"authority": "aga-api.aichatting.net",
"accept": "text/event-stream,application/json, text/event-stream",
"lang": "en",
"source": "web",
"vtoken": visitorId,
}
data = {
"spaceHandle": True,
"roleId": 0,
"messages": messages,
"conversationId": conversation_id,
}
response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True)
response.raise_for_status()
for chunk in response.iter_lines():
if chunk.startswith(b"data:"):
yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "")
@classmethod
def reset(cls):
cls._session = None

View File

@ -1,68 +0,0 @@
from __future__ import annotations
import re
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class ChatAiGpt(AsyncGeneratorProvider):
url = "https://chataigpt.org"
supports_gpt_35_turbo = True
_nonce = None
_post_id = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Origin": cls.url,
"Alt-Used": cls.url,
"Connection": "keep-alive",
"Referer": cls.url,
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(headers=headers) as session:
if not cls._nonce:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(
r'data-nonce=(.*?) data-post-id=([0-9]+)', response
)
if result:
cls._nonce, cls._post_id = result.group(1), result.group(2)
else:
raise RuntimeError("No nonce found")
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": 0
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@ -1,44 +0,0 @@
from __future__ import annotations
import json
from ...requests import StreamSession
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, format_prompt
class Komo(AsyncGeneratorProvider):
url = "https://komo.ai/api/ask"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
prompt = format_prompt(messages)
data = {
"query": prompt,
"FLAG_URLEXTRACT": "false",
"token": "",
"FLAG_MODELA": "1",
}
headers = {
'authority': 'komo.ai',
'accept': 'text/event-stream',
'cache-control': 'no-cache',
'referer': 'https://komo.ai/',
}
async with session.get(cls.url, params=data, headers=headers) as response:
response.raise_for_status()
next = False
async for line in response.iter_lines():
if line == b"event: line":
next = True
elif next and line.startswith(b"data: "):
yield json.loads(line[6:])
next = False

View File

@ -1,97 +0,0 @@
from __future__ import annotations
import random, json
from datetime import datetime
from ...requests import StreamSession
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider
class MikuChat(AsyncGeneratorProvider):
url = "https://ai.okmiku.com"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
headers = {
"authority": "api.catgpt.cc",
"accept": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat/",
'x-app-version': 'undefined',
'x-date': get_datetime(),
'x-fingerprint': get_fingerprint(),
'x-platform': 'web'
}
async with StreamSession(headers=headers, impersonate="chrome107") as session:
data = {
"model": model,
"top_p": 0.8,
"temperature": 0.5,
"presence_penalty": 1,
"frequency_penalty": 0,
"max_tokens": 2000,
"stream": True,
"messages": messages,
}
async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
print(await response.text())
response.raise_for_status()
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk
def k(e: str, t: int):
a = len(e) & 3
s = len(e) - a
i = t
c = 3432918353
o = 461845907
n = 0
r = 0
while n < s:
r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
n += 4
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r
i = (i << 13) | (i >> 19)
l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
if a == 3:
r ^= (ord(e[n + 2]) & 255) << 16
elif a == 2:
r ^= (ord(e[n + 1]) & 255) << 8
elif a == 1:
r ^= ord(e[n]) & 255
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r
i ^= len(e)
i ^= i >> 16
i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
i ^= i >> 13
i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
i ^= i >> 16
return i & 0xFFFFFFFF
def get_fingerprint() -> str:
return str(k(str(int(random.random() * 100000)), 256))
def get_datetime() -> str:
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

View File

@ -1,4 +0,0 @@
from .MikuChat import MikuChat
from .Komo import Komo
from .ChatAiGpt import ChatAiGpt
from .AiChatting import AiChatting

View File

@ -91,7 +91,6 @@ body {
background: var(--colour-1); background: var(--colour-1);
color: var(--colour-3); color: var(--colour-3);
height: 100vh; height: 100vh;
max-width: 1600px;
margin: auto; margin: auto;
} }

View File

@ -4,21 +4,20 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType from .Provider import IterListProvider, ProviderType
from .Provider import ( from .Provider import (
AiChatOnline,
Airforce, Airforce,
Allyfy, Allyfy,
Bing, Bing,
Binjie, Binjie,
Bixin123, Bixin123,
Blackbox, Blackbox,
ChatGot,
Chatgpt4Online,
ChatGpt, ChatGpt,
Chatgpt4o, Chatgpt4o,
Chatgpt4Online,
ChatGptEs,
ChatgptFree, ChatgptFree,
CodeNews,
DDG, DDG,
DeepInfra, DeepInfra,
DeepInfraChat,
DeepInfraImage, DeepInfraImage,
Free2GPT, Free2GPT,
FreeChatgpt, FreeChatgpt,
@ -31,6 +30,7 @@ from .Provider import (
HuggingFace, HuggingFace,
Koala, Koala,
Liaobots, Liaobots,
LiteIcoding,
MagickPen, MagickPen,
MetaAI, MetaAI,
Nexra, Nexra,
@ -41,9 +41,7 @@ from .Provider import (
Reka, Reka,
Replicate, Replicate,
ReplicateHome, ReplicateHome,
Snova,
TeachAnything, TeachAnything,
TwitterBio,
Upstage, Upstage,
You, You,
) )
@ -76,7 +74,6 @@ default = Model(
FreeChatgpt, FreeChatgpt,
HuggingChat, HuggingChat,
Pizzagpt, Pizzagpt,
ChatgptFree,
ReplicateHome, ReplicateHome,
Upstage, Upstage,
Blackbox, Blackbox,
@ -84,6 +81,8 @@ default = Model(
Binjie, Binjie,
Free2GPT, Free2GPT,
MagickPen, MagickPen,
DeepInfraChat,
LiteIcoding,
]) ])
) )
@ -106,7 +105,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([ best_provider = IterListProvider([
Allyfy, TwitterBio, Nexra, Bixin123, CodeNews, Airforce, Allyfy, Nexra, Bixin123, Airforce,
]) ])
) )
@ -115,7 +114,7 @@ gpt_4o = Model(
name = 'gpt-4o', name = 'gpt-4o',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([ best_provider = IterListProvider([
Liaobots, Chatgpt4o, Airforce, Liaobots, Airforce, Chatgpt4o, ChatGptEs,
OpenaiChat OpenaiChat
]) ])
) )
@ -124,8 +123,8 @@ gpt_4o_mini = Model(
name = 'gpt-4o-mini', name = 'gpt-4o-mini',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([ best_provider = IterListProvider([
DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews, MagickPen, Airforce, DDG, ChatGptEs, You, FreeNetfly, Pizzagpt, LiteIcoding, MagickPen, Liaobots, Airforce, ChatgptFree, Koala,
OpenaiChat, Koala, ChatGpt OpenaiChat, ChatGpt
]) ])
) )
@ -141,8 +140,26 @@ gpt_4 = Model(
name = 'gpt-4', name = 'gpt-4',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([ best_provider = IterListProvider([
Chatgpt4Online, Nexra, Binjie, Airforce, Bing, Nexra, Binjie, Airforce,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider,
Chatgpt4Online, Bing, OpenaiChat,
])
)
# o1
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots
])
)
o1_mini = Model(
name = 'o1-mini',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots
]) ])
) )
@ -161,40 +178,55 @@ meta = Model(
best_provider = MetaAI best_provider = MetaAI
) )
# llama 2
llama_2_13b = Model( llama_2_13b = Model(
name = "llama-2-13b", name = "llama-2-13b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([Airforce]) best_provider = IterListProvider([Airforce])
) )
# llama 3
llama_3_8b = Model( llama_3_8b = Model(
name = "llama-3-8b", name = "llama-3-8b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([Airforce, DeepInfra, Replicate]) best_provider = IterListProvider([Airforce, DeepInfra, Replicate])
) )
llama_3_70b = Model( llama_3_70b = Model(
name = "llama-3-70b", name = "llama-3-70b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate]) best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
) )
llama_3 = Model(
name = "llama-3",
base_provider = "Meta Llama",
best_provider = IterListProvider([llama_3_8b.best_provider, llama_3_70b.best_provider])
)
# llama 3.1
llama_3_1_8b = Model( llama_3_1_8b = Model(
name = "llama-3.1-8b", name = "llama-3.1-8b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, Airforce, PerplexityLabs]) best_provider = IterListProvider([Blackbox, DeepInfraChat, Airforce, PerplexityLabs])
) )
llama_3_1_70b = Model( llama_3_1_70b = Model(
name = "llama-3.1-70b", name = "llama-3.1-70b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, Airforce, HuggingFace, PerplexityLabs]) best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, Airforce, HuggingFace, PerplexityLabs])
) )
llama_3_1_405b = Model( llama_3_1_405b = Model(
name = "llama-3.1-405b", name = "llama-3.1-405b",
base_provider = "Meta", base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, Airforce]) best_provider = IterListProvider([Blackbox, DeepInfraChat, Airforce])
)
llama_3_1 = Model(
name = "llama-3.1",
base_provider = "Meta Llama",
best_provider = IterListProvider([llama_3_1_8b.best_provider, llama_3_1_70b.best_provider, llama_3_1_405b.best_provider,])
) )
@ -202,19 +234,19 @@ llama_3_1_405b = Model(
mistral_7b = Model( mistral_7b = Model(
name = "mistral-7b", name = "mistral-7b",
base_provider = "Mistral", base_provider = "Mistral",
best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace, DeepInfra]) best_provider = IterListProvider([HuggingChat, DeepInfraChat, Airforce, HuggingFace, DeepInfra])
) )
mixtral_8x7b = Model( mixtral_8x7b = Model(
name = "mixtral-8x7b", name = "mixtral-8x7b",
base_provider = "Mistral", base_provider = "Mistral",
best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, Airforce, DeepInfra, HuggingFace]) best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, DeepInfraChat, Airforce, DeepInfra, HuggingFace])
) )
mixtral_8x22b = Model( mixtral_8x22b = Model(
name = "mixtral-8x22b", name = "mixtral-8x22b",
base_provider = "Mistral", base_provider = "Mistral",
best_provider = IterListProvider([Airforce]) best_provider = IterListProvider([DeepInfraChat, Airforce])
) )
@ -225,13 +257,6 @@ mixtral_8x7b_dpo = Model(
best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace]) best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace])
) )
yi_34b = Model(
name = 'yi-34b',
base_provider = 'NousResearch',
best_provider = IterListProvider([Airforce])
)
### Microsoft ### ### Microsoft ###
phi_3_mini_4k = Model( phi_3_mini_4k = Model(
name = "phi-3-mini-4k", name = "phi-3-mini-4k",
@ -239,38 +264,37 @@ phi_3_mini_4k = Model(
best_provider = IterListProvider([HuggingChat, HuggingFace]) best_provider = IterListProvider([HuggingChat, HuggingFace])
) )
phi_3_medium_4k = Model(
name = "phi-3-medium-4k",
base_provider = "Microsoft",
best_provider = IterListProvider([DeepInfraChat])
)
### Google ###
### Google DeepMind ###
# gemini # gemini
gemini_pro = Model( gemini_pro = Model(
name = 'gemini-pro', name = 'gemini-pro',
base_provider = 'Google', base_provider = 'Google DeepMind',
best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots, Airforce]) best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, Liaobots, Airforce])
) )
gemini_flash = Model( gemini_flash = Model(
name = 'gemini-flash', name = 'gemini-flash',
base_provider = 'Google', base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Liaobots, Airforce]) best_provider = IterListProvider([Blackbox, Liaobots, Airforce])
) )
gemini = Model( gemini = Model(
name = 'gemini', name = 'gemini',
base_provider = 'Google', base_provider = 'Google DeepMind',
best_provider = IterListProvider([ best_provider = IterListProvider([
Gemini, Gemini,
gemini_flash.best_provider, gemini_pro.best_provider gemini_flash.best_provider, gemini_pro.best_provider
]) ])
) )
# gemma # gemma
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
best_provider = IterListProvider([ReplicateHome, Airforce])
)
gemma_2b_9b = Model( gemma_2b_9b = Model(
name = 'gemma-2b-9b', name = 'gemma-2b-9b',
base_provider = 'Google', base_provider = 'Google',
@ -280,9 +304,19 @@ gemma_2b_9b = Model(
gemma_2b_27b = Model( gemma_2b_27b = Model(
name = 'gemma-2b-27b', name = 'gemma-2b-27b',
base_provider = 'Google', base_provider = 'Google',
best_provider = IterListProvider([Airforce]) best_provider = IterListProvider([DeepInfraChat, Airforce])
) )
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
best_provider = IterListProvider([
ReplicateHome, Airforce,
gemma_2b_9b.best_provider, gemma_2b_27b.best_provider,
])
)
### Anthropic ### ### Anthropic ###
claude_2 = Model( claude_2 = Model(
name = 'claude-2', name = 'claude-2',
@ -302,6 +336,7 @@ claude_2_1 = Model(
best_provider = IterListProvider([Liaobots]) best_provider = IterListProvider([Liaobots])
) )
# claude 3
claude_3_opus = Model( claude_3_opus = Model(
name = 'claude-3-opus', name = 'claude-3-opus',
base_provider = 'Anthropic', base_provider = 'Anthropic',
@ -314,18 +349,37 @@ claude_3_sonnet = Model(
best_provider = IterListProvider([Liaobots]) best_provider = IterListProvider([Liaobots])
) )
claude_3_5_sonnet = Model(
name = 'claude-3-5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model( claude_3_haiku = Model(
name = 'claude-3-haiku', name = 'claude-3-haiku',
base_provider = 'Anthropic', base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, Liaobots]) best_provider = IterListProvider([DDG, Liaobots])
) )
claude_3 = Model(
name = 'claude-3',
base_provider = 'Anthropic',
best_provider = IterListProvider([
claude_3_opus.best_provider, claude_3_sonnet.best_provider, claude_3_haiku.best_provider
])
)
# claude 3.5
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Liaobots])
)
claude_3_5 = Model(
name = 'claude-3.5',
base_provider = 'Anthropic',
best_provider = IterListProvider([
LiteIcoding,
claude_3_5_sonnet.best_provider
])
)
### Reka AI ### ### Reka AI ###
reka_core = Model( reka_core = Model(
@ -335,11 +389,11 @@ reka_core = Model(
) )
### Blackbox ### ### Blackbox AI ###
blackbox = Model( blackbox = Model(
name = 'blackbox', name = 'blackbox',
base_provider = 'Blackbox', base_provider = 'Blackbox AI',
best_provider = Blackbox best_provider = IterListProvider([Blackbox])
) )
@ -366,6 +420,7 @@ sparkdesk_v1_1 = Model(
best_provider = IterListProvider([FreeChatgpt, Airforce]) best_provider = IterListProvider([FreeChatgpt, Airforce])
) )
### Qwen ### ### Qwen ###
qwen_1_5_14b = Model( qwen_1_5_14b = Model(
name = 'qwen-1.5-14b', name = 'qwen-1.5-14b',
@ -388,7 +443,7 @@ qwen_1_5_110b = Model(
qwen_2_72b = Model( qwen_2_72b = Model(
name = 'qwen-2-72b', name = 'qwen-2-72b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([Airforce]) best_provider = IterListProvider([DeepInfraChat, Airforce])
) )
qwen_turbo = Model( qwen_turbo = Model(
@ -397,6 +452,14 @@ qwen_turbo = Model(
best_provider = IterListProvider([Bixin123]) best_provider = IterListProvider([Bixin123])
) )
qwen = Model(
name = 'qwen',
base_provider = 'Qwen',
best_provider = IterListProvider([
qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_2_72b.best_provider, qwen_turbo.best_provider
])
)
### Zhipu AI ### ### Zhipu AI ###
glm_3_6b = Model( glm_3_6b = Model(
@ -415,11 +478,11 @@ glm_4 = Model(
name = 'glm-4', name = 'glm-4',
base_provider = 'Zhipu AI', base_provider = 'Zhipu AI',
best_provider = IterListProvider([ best_provider = IterListProvider([
CodeNews,
glm_3_6b.best_provider, glm_4_9b.best_provider glm_3_6b.best_provider, glm_4_9b.best_provider
]) ])
) )
### 01-ai ### ### 01-ai ###
yi_1_5_9b = Model( yi_1_5_9b = Model(
name = 'yi-1.5-9b', name = 'yi-1.5-9b',
@ -427,6 +490,13 @@ yi_1_5_9b = Model(
best_provider = IterListProvider([FreeChatgpt]) best_provider = IterListProvider([FreeChatgpt])
) )
yi_34b = Model(
name = 'yi-34b',
base_provider = '01-ai',
best_provider = IterListProvider([Airforce])
)
### Upstage ### ### Upstage ###
solar_1_mini = Model( solar_1_mini = Model(
name = 'solar-1-mini', name = 'solar-1-mini',
@ -440,47 +510,38 @@ solar_10_7b = Model(
best_provider = Airforce best_provider = Airforce
) )
solar_pro = Model(
name = 'solar-pro',
base_provider = 'Upstage',
best_provider = Upstage
)
### Pi ###
### Inflection ###
pi = Model( pi = Model(
name = 'pi', name = 'pi',
base_provider = 'inflection', base_provider = 'Inflection',
best_provider = Pi best_provider = Pi
) )
### SambaNova ###
samba_coe_v0_1 = Model(
name = 'samba-coe-v0.1',
base_provider = 'SambaNova',
best_provider = Snova
)
### Trong-Hieu Nguyen-Mau ###
v1olet_merged_7b = Model(
name = 'v1olet-merged-7b',
base_provider = 'Trong-Hieu Nguyen-Mau',
best_provider = Snova
)
### Macadeliccc ###
westlake_7b_v2 = Model(
name = 'westlake-7b-v2',
base_provider = 'Macadeliccc',
best_provider = Snova
)
### DeepSeek ### ### DeepSeek ###
deepseek = Model( deepseek = Model(
name = 'deepseek', name = 'deepseek',
base_provider = 'DeepSeek', base_provider = 'DeepSeek',
best_provider = IterListProvider([CodeNews, Airforce]) best_provider = IterListProvider([Airforce])
) )
### WizardLM ### ### WizardLM ###
wizardlm_2_7b = Model(
name = 'wizardlm-2-7b',
base_provider = 'WizardLM',
best_provider = IterListProvider([DeepInfraChat])
)
wizardlm_2_8x22b = Model( wizardlm_2_8x22b = Model(
name = 'wizardlm-2-8x22b', name = 'wizardlm-2-8x22b',
base_provider = 'WizardLM', base_provider = 'WizardLM',
best_provider = Airforce best_provider = IterListProvider([DeepInfraChat, Airforce])
) )
### Together ### ### Together ###
@ -490,6 +551,7 @@ sh_n_7b = Model(
best_provider = Airforce best_provider = Airforce
) )
### Yorickvp ### ### Yorickvp ###
llava_13b = Model( llava_13b = Model(
name = 'llava-13b', name = 'llava-13b',
@ -497,6 +559,62 @@ llava_13b = Model(
best_provider = ReplicateHome best_provider = ReplicateHome
) )
### OpenBMB ###
minicpm_llama_3_v2_5 = Model(
name = 'minicpm-llama-3-v2.5',
base_provider = 'OpenBMB',
best_provider = DeepInfraChat
)
### Lzlv ###
lzlv_70b = Model(
name = 'lzlv-70b',
base_provider = 'Lzlv',
best_provider = DeepInfraChat
)
### OpenChat ###
openchat_3_6_8b = Model(
name = 'openchat-3.6-8b',
base_provider = 'OpenChat',
best_provider = DeepInfraChat
)
### Phind ###
phind_codellama_34b_v2 = Model(
name = 'phind-codellama-34b-v2',
base_provider = 'Phind',
best_provider = DeepInfraChat
)
### Cognitive Computations ###
dolphin_2_9_1_llama_3_70b = Model(
name = 'dolphin-2.9.1-llama-3-70b',
base_provider = 'Cognitive Computations',
best_provider = DeepInfraChat
)
### x.ai ###
grok_2 = Model(
name = 'grok-2',
base_provider = 'x.ai',
best_provider = Liaobots
)
grok_2_mini = Model(
name = 'grok-2-mini',
base_provider = 'x.ai',
best_provider = Liaobots
)
############# #############
### Image ### ### Image ###
############# #############
@ -516,10 +634,11 @@ sd_3 = Model(
) )
### Playground ### ### Playground ###
playground_v2_5 = Model( playground_v2_5 = Model(
name = 'playground-v2.5', name = 'playground-v2.5',
base_provider = 'Stability AI', base_provider = 'Playground AI',
best_provider = IterListProvider([ReplicateHome]) best_provider = IterListProvider([ReplicateHome])
) )
@ -528,7 +647,7 @@ playground_v2_5 = Model(
flux = Model( flux = Model(
name = 'flux', name = 'flux',
base_provider = 'Flux AI', base_provider = 'Flux AI',
best_provider = IterListProvider([Airforce]) best_provider = IterListProvider([Airforce, Blackbox])
) )
@ -567,6 +686,13 @@ flux_pixel = Model(
) )
flux_4o = Model(
name = 'flux-4o',
base_provider = 'Flux AI',
best_provider = IterListProvider([Airforce])
)
flux_schnell = Model( flux_schnell = Model(
name = 'flux-schnell', name = 'flux-schnell',
base_provider = 'Flux AI', base_provider = 'Flux AI',
@ -574,19 +700,29 @@ flux_schnell = Model(
) )
### ### ### ###
dalle = Model(
name = 'dalle',
base_provider = '',
best_provider = IterListProvider([Nexra])
)
dalle_2 = Model( dalle_2 = Model(
name = 'dalle-2', name = 'dalle-2',
base_provider = '', base_provider = '',
best_provider = IterListProvider([Nexra]) best_provider = IterListProvider([Nexra])
)
dalle_3 = Model(
name = 'dalle-3',
base_provider = '',
best_provider = IterListProvider([Airforce])
)
dalle = Model(
name = 'dalle',
base_provider = '',
best_provider = IterListProvider([
Nexra,
dalle_2.best_provider, dalle_3.best_provider,
])
) )
dalle_mini = Model( dalle_mini = Model(
@ -596,6 +732,7 @@ dalle_mini = Model(
) )
### ###
emi = Model( emi = Model(
name = 'emi', name = 'emi',
base_provider = '', base_provider = '',
@ -631,10 +768,14 @@ class ModelUtils:
'gpt-3.5-turbo': gpt_35_turbo, 'gpt-3.5-turbo': gpt_35_turbo,
# gpt-4 # gpt-4
'gpt-4o' : gpt_4o, 'gpt-4o': gpt_4o,
'gpt-4o-mini' : gpt_4o_mini, 'gpt-4o-mini': gpt_4o_mini,
'gpt-4' : gpt_4, 'gpt-4': gpt_4,
'gpt-4-turbo' : gpt_4_turbo, 'gpt-4-turbo': gpt_4_turbo,
# o1
'o1': o1,
'o1-mini': o1_mini,
### Meta ### ### Meta ###
@ -644,10 +785,12 @@ class ModelUtils:
'llama-2-13b': llama_2_13b, 'llama-2-13b': llama_2_13b,
# llama-3 # llama-3
'llama-3': llama_3,
'llama-3-8b': llama_3_8b, 'llama-3-8b': llama_3_8b,
'llama-3-70b': llama_3_70b, 'llama-3-70b': llama_3_70b,
# llama-3.1 # llama-3.1
'llama-3.1': llama_3_1,
'llama-3.1-8b': llama_3_1_8b, 'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b, 'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b, 'llama-3.1-405b': llama_3_1_405b,
@ -667,6 +810,7 @@ class ModelUtils:
### Microsoft ### ### Microsoft ###
'phi-3-mini-4k': phi_3_mini_4k, 'phi-3-mini-4k': phi_3_mini_4k,
'phi_3_medium-4k': phi_3_medium_4k,
### Google ### ### Google ###
@ -686,17 +830,22 @@ class ModelUtils:
'claude-2.0': claude_2_0, 'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1, 'claude-2.1': claude_2_1,
# claude 3
'claude-3': claude_3,
'claude-3-opus': claude_3_opus, 'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet, 'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku, 'claude-3-haiku': claude_3_haiku,
'claude-3-5-sonnet': claude_3_5_sonnet,
# claude 3.5
'claude-3.5': claude_3_5,
'claude-3.5-sonnet': claude_3_5_sonnet,
### Reka AI ### ### Reka AI ###
'reka-core': reka_core, 'reka-core': reka_core,
### Blackbox ### ### Blackbox AI ###
'blackbox': blackbox, 'blackbox': blackbox,
@ -717,6 +866,7 @@ class ModelUtils:
### Qwen ### ### Qwen ###
'qwen': qwen,
'qwen-1.5-14b': qwen_1_5_14b, 'qwen-1.5-14b': qwen_1_5_14b,
'qwen-1.5-72b': qwen_1_5_72b, 'qwen-1.5-72b': qwen_1_5_72b,
'qwen-1.5-110b': qwen_1_5_110b, 'qwen-1.5-110b': qwen_1_5_110b,
@ -737,24 +887,12 @@ class ModelUtils:
### Upstage ### ### Upstage ###
'solar-1-mini': solar_1_mini, 'solar-1-mini': solar_1_mini,
'solar-10-7b': solar_10_7b, 'solar-10-7b': solar_10_7b,
'solar-pro': solar_pro,
### Pi ### ### Inflection ###
'pi': pi, 'pi': pi,
### SambaNova ###
'samba-coe-v0.1': samba_coe_v0_1,
### Trong-Hieu Nguyen-Mau ###
'v1olet-merged-7b': v1olet_merged_7b,
### Macadeliccc ###
'westlake-7b-v2': westlake_7b_v2,
### DeepSeek ### ### DeepSeek ###
'deepseek': deepseek, 'deepseek': deepseek,
@ -767,6 +905,36 @@ class ModelUtils:
'llava-13b': llava_13b, 'llava-13b': llava_13b,
### WizardLM ###
'wizardlm-2-7b': wizardlm_2_7b,
'wizardlm-2-8x22b': wizardlm_2_8x22b,
### OpenBMB ###
'minicpm-llama-3-v2.5': minicpm_llama_3_v2_5,
### Lzlv ###
'lzlv-70b': lzlv_70b,
### OpenChat ###
'openchat-3.6-8b': openchat_3_6_8b,
### Phind ###
'phind-codellama-34b-v2': phind_codellama_34b_v2,
### Cognitive Computations ###
'dolphin-2.9.1-llama-3-70b': dolphin_2_9_1_llama_3_70b,
### x.ai ###
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
############# #############
### Image ### ### Image ###
@ -788,12 +956,14 @@ class ModelUtils:
'flux-3d': flux_3d, 'flux-3d': flux_3d,
'flux-disney': flux_disney, 'flux-disney': flux_disney,
'flux-pixel': flux_pixel, 'flux-pixel': flux_pixel,
'flux-4o': flux_4o,
'flux-schnell': flux_schnell, 'flux-schnell': flux_schnell,
### ### ### ###
'dalle': dalle, 'dalle': dalle,
'dalle-2': dalle_2, 'dalle-2': dalle_2,
'dalle-3': dalle_3,
'dalle-mini': dalle_mini, 'dalle-mini': dalle_mini,
'emi': emi, 'emi': emi,
'any-dark': any_dark, 'any-dark': any_dark,