Merge branch 'main' into feature/docker-setup

This commit is contained in:
Ryan Jordan 2023-09-06 00:39:57 +00:00 committed by GitHub
commit f81e618958
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 2020 additions and 1360 deletions

162
README.md
View File

@ -178,28 +178,102 @@ for message in response:
print(message) print(message)
``` ```
providers:
##### Providers:
```py ```py
from g4f.Provider import ( from g4f.Provider import (
Acytoo, Acytoo,
Aichat, Aichat,
Ails, Ails,
AiService,
AItianhu,
Bard, Bard,
Bing, Bing,
ChatgptAi, ChatgptAi,
ChatgptLogin, ChatgptLogin,
DeepAi, DeepAi,
GetGpt EasyChat,
Equing,
GetGpt,
H2o,
HuggingChat,
Opchatgpts,
OpenAssistant,
OpenaiChat,
Raycast,
Theb,
Vercel,
Wewordle,
Wuguokai,
You,
Yqcloud
) )
# Usage:
# usage:
response = g4f.ChatCompletion.create(..., provider=ProviderName) response = g4f.ChatCompletion.create(..., provider=ProviderName)
``` ```
##### Needs cookies:
Many providers need cookies to work.
In Bing you need a session, where you have passed the captcha.
And in others providers you have to log-in into your account.
If you run the g4l package locally,
cookies from your browsers are readed with `get_cookies`.
Else you have pass them in the parameter `cookies`:
```py
import g4f
from g4f.Provider import (
Bard,
Bing,
H2o,
HuggingChat,
OpenAssistant,
OpenaiChat,
You,
)
# Usage:
response = g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
provider=Bard,
#cookies=g4f.get_cookies(".google.com"),
cookies={"cookie_name": "value", "cookie_name2": "value2"},
auth=True
)
```
##### Async support:
Run providers `async` to improve speed / performance.
The full execution time corresponds to the execution time of the slowest provider.
```py
import g4f, asyncio
async def run_async():
_providers = [
g4f.Provider.Bard,
g4f.Provider.Bing,
g4f.Provider.H2o,
g4f.Provider.HuggingChat,
g4f.Provider.Liaobots,
g4f.Provider.OpenAssistant,
g4f.Provider.OpenaiChat,
g4f.Provider.You,
g4f.Provider.Yqcloud,
]
responses = [
provider.create_async(
model=None,
messages=[{"role": "user", "content": "Hello"}],
)
for provider in _providers
]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
asyncio.run(run_async())
```
### interference openai-proxy api (use with openai python package) ### interference openai-proxy api (use with openai python package)
get requirements: get requirements:
@ -247,32 +321,40 @@ if __name__ == "__main__":
### gpt-3.5 / gpt-4 ### gpt-3.5 / gpt-4
| Website | Provider | gpt-3.5 | gpt-4 | Streaming | Status | Auth | | Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |
| ----------------------------------------------------------------------------- | ------------------------- | ------- | ----- | --------- | ---------------------------------------------------------- | ---- | | ------ | ------- | ------- | ----- | --------- | ------ | ---- |
| [www.aitianhu.com](https://www.aitianhu.com/api/chat-process) | g4f.Provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.acytoo.com](https://chat.acytoo.com/) | g4f.provider.Acytoo | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat.acytoo.com](https://chat.acytoo.com/api/completions) | g4f.Provider.Acytoo | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat-gpt.org](https://chat-gpt.org/chat) | g4f.provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [aiservice.vercel.app](https://aiservice.vercel.app/api/chat/answer) | g4f.Provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [ai.ls](https://ai.ls) | g4f.provider.Ails | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat-gpt.org](https://chat-gpt.org/chat) | g4f.Provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [bard.google.com](https://bard.google.com) | g4f.provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [ai.ls](https://ai.ls) | g4f.Provider.Ails | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [bing.com](https://bing.com/chat) | g4f.provider.Bing | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [bard.google.com](https://bard.google.com) | g4f.Provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [bing.com](https://bing.com/chat) | g4f.Provider.Bing | ❌ | ✔️ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [opchatgpts.net](https://opchatgpts.net) | g4f.provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.Provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [deepai.org](https://deepai.org) | g4f.provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatgptlogin.ac](https://chatgptlogin.ac) | g4f.Provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [free.easychat.work](https://free.easychat.work) | g4f.provider.EasyChat | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [deepai.org](https://deepai.org) | g4f.Provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [next.eqing.tech](https://next.eqing.tech/) | g4f.provider.Equing | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat.dfehub.com](https://chat.dfehub.com/api/chat) | g4f.Provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.getgpt.world](https://chat.getgpt.world/) | g4f.provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [free.easychat.work](https://free.easychat.work) | g4f.Provider.EasyChat | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [forefront.com](https://forefront.com) | g4f.Provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [huggingface.co](https://huggingface.co/chat/) | g4f.provider.HuggingChat | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [chat.getgpt.world](https://chat.getgpt.world/) | g4f.Provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [liaobots.com](https://liaobots.com) | g4f.provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.Provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [opchatgpts.net](https://opchatgpts.net) | g4f.provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [liaobots.com](https://liaobots.com) | g4f.Provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ | | [open-assistant.io](https://open-assistant.io/chat) | g4f.provider.OpenAssistant | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.Provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.openai.com](https://chat.openai.com) | g4f.provider.OpenaiChat | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [opchatgpts.net](https://opchatgpts.net) | g4f.Provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [raycast.com](https://raycast.com) | g4f.provider.Raycast | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [backend.raycast.com](https://backend.raycast.com/api/v1/ai/chat_completions) | g4f.Provider.Raycast | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [theb.ai](https://theb.ai) | g4f.provider.Theb | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [theb.ai](https://theb.ai) | g4f.Provider.Theb | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [play.vercel.ai](https://play.vercel.ai) | g4f.provider.Vercel | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [play.vercel.ai](https://play.vercel.ai) | g4f.Provider.Vercel | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [wewordle.org](https://wewordle.org/) | g4f.provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | g4f.Provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat.wuguokai.xyz](https://chat.wuguokai.xyz) | g4f.provider.Wuguokai | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [you.com](https://you.com) | g4f.Provider.You | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [you.com](https://you.com) | g4f.provider.You | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.Provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [www.aitianhu.com](https://www.aitianhu.com/) | g4f.provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [aiservice.vercel.app](https://aiservice.vercel.app/) | g4f.provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.dfehub.com](https://chat.dfehub.com/) | g4f.provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat9.fastgpt.me](https://chat9.fastgpt.me/) | g4f.provider.FastGpt | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [forefront.com](https://forefront.com) | g4f.provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [p5.v50.ltd](https://p5.v50.ltd) | g4f.provider.V50 | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other Models ### Other Models
@ -360,6 +442,20 @@ if __name__ == "__main__":
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td> <td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td> <td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr> </tr>
<tr>
<td><a href="https://github.com/Lin-jun-xiang/action-translate-readme"><b>Action Translate Readme</b></a></td>
<td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/issues"><img alt="Issues" src="https://img.shields.io/github/issues/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
</tr>
<tr>
<td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit"><b>Langchain Document GPT</b></a></td>
<td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/issues"><img alt="Issues" src="https://img.shields.io/github/issues/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
</tr>
</tbody> </tbody>
</table> </table>

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import requests import requests
@ -7,7 +9,7 @@ from .base_provider import BaseProvider
class AItianhu(BaseProvider): class AItianhu(BaseProvider):
url = "https://www.aitianhu.com/api/chat-process" url = "https://www.aitianhu.com/"
working = False working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@ -15,13 +17,10 @@ class AItianhu(BaseProvider):
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult: base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
base = "" base += "\nassistant: "
for message in messages:
base += "%s: %s\n" % (message["role"], message["content"])
base += "assistant:"
headers = { headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import time import time
import requests import requests
@ -7,42 +9,42 @@ from .base_provider import BaseProvider
class Acytoo(BaseProvider): class Acytoo(BaseProvider):
url = "https://chat.acytoo.com/api/completions" url = 'https://chat.acytoo.com/'
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @classmethod
def create_completion( def create_completion(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
headers = _create_header()
payload = _create_payload(messages, kwargs.get('temperature', 0.5))
url = "https://chat.acytoo.com/api/completions" response = requests.post(f'{cls.url}api/completions',
response = requests.post(url=url, headers=headers, json=payload) headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5)))
response.raise_for_status() response.raise_for_status()
response.encoding = "utf-8" response.encoding = 'utf-8'
yield response.text yield response.text
def _create_header(): def _create_header():
return { return {
"accept": "*/*", 'accept': '*/*',
"content-type": "application/json", 'content-type': 'application/json',
} }
def _create_payload(messages: list[dict[str, str]], temperature): def _create_payload(messages: list[dict[str, str]], temperature):
payload_messages = [ payload_messages = [
message | {"createdAt": int(time.time()) * 1000} for message in messages message | {'createdAt': int(time.time()) * 1000} for message in messages
] ]
return { return {
"key": "", 'key' : '',
"model": "gpt-3.5-turbo", 'model' : 'gpt-3.5-turbo',
"messages": payload_messages, 'messages' : payload_messages,
"temperature": temperature, 'temperature' : temperature,
"password": "", 'password' : ''
} }

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import requests import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
@ -5,7 +7,7 @@ from .base_provider import BaseProvider
class AiService(BaseProvider): class AiService(BaseProvider):
url = "https://aiservice.vercel.app/api/chat/answer" url = "https://aiservice.vercel.app/"
working = False working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@ -16,10 +18,8 @@ class AiService(BaseProvider):
stream: bool, stream: bool,
**kwargs: Any, **kwargs: Any,
) -> CreateResult: ) -> CreateResult:
base = "" base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
for message in messages: base += "\nassistant: "
base += "%s: %s\n" % (message["role"], message["content"])
base += "assistant:"
headers = { headers = {
"accept": "*/*", "accept": "*/*",

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import requests import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
@ -5,22 +7,18 @@ from .base_provider import BaseProvider
class Aichat(BaseProvider): class Aichat(BaseProvider):
url = "https://chat-gpt.org/chat" url = "https://chat-gpt.org/chat"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult: chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
base = "" chat += "\nassistant: "
for message in messages:
base += "%s: %s\n" % (message["role"], message["content"])
base += "assistant:"
headers = { headers = {
"authority": "chat-gpt.org", "authority": "chat-gpt.org",

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import hashlib import hashlib
import json import json
import time import time
@ -11,18 +13,17 @@ from .base_provider import BaseProvider
class Ails(BaseProvider): class Ails(BaseProvider):
url: str = "https://ai.ls" url: str = "https://ai.ls"
working = True working = True
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
headers = { headers = {
"authority": "api.caipacity.com", "authority": "api.caipacity.com",
"accept": "*/*", "accept": "*/*",
@ -72,6 +73,8 @@ class Ails(BaseProvider):
if b"content" in token: if b"content" in token:
completion_chunk = json.loads(token.decode().replace("data: ", "")) completion_chunk = json.loads(token.decode().replace("data: ", ""))
token = completion_chunk["choices"][0]["delta"].get("content") token = completion_chunk["choices"][0]["delta"].get("content")
if "ai.ls" in token.lower() or "ai.ci" in token.lower():
raise Exception("Response Error: " + token)
if token != None: if token != None:
yield token yield token

View File

@ -1,51 +1,33 @@
from __future__ import annotations
import json import json
import random import random
import re import re
import browser_cookie3
from aiohttp import ClientSession from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult from .base_provider import AsyncProvider, format_prompt, get_cookies
from .base_provider import BaseProvider
class Bard(BaseProvider):
class Bard(AsyncProvider):
url = "https://bard.google.com" url = "https://bard.google.com"
needs_auth = True needs_auth = True
working = True working = True
@classmethod
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool,
proxy: str = None,
cookies: dict = {},
**kwargs: Any,
) -> CreateResult:
yield asyncio.run(cls.create_async(str, messages, proxy, cookies))
@classmethod @classmethod
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
cookies: dict = {}, cookies: dict = None,
**kwargs: Any, **kwargs
) -> str: ) -> str:
if not cookies: prompt = format_prompt(messages)
for cookie in browser_cookie3.load(domain_name='.google.com'):
cookies[cookie.name] = cookie.value
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
if proxy and "://" not in proxy: if proxy and "://" not in proxy:
proxy = f"http://{proxy}" proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".google.com")
headers = { headers = {
'authority': 'bard.google.com', 'authority': 'bard.google.com',
@ -62,10 +44,11 @@ class Bard(BaseProvider):
) as session: ) as session:
async with session.get(cls.url, proxy=proxy) as response: async with session.get(cls.url, proxy=proxy) as response:
text = await response.text() text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text) match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if match: if not match:
snlm0e = match.group(1) raise RuntimeError("No snlm0e value.")
snlm0e = match.group(1)
params = { params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0', 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',

View File

@ -1,294 +1,94 @@
from __future__ import annotations
import asyncio import asyncio
import json import json
import os import os
import random import random
import ssl
import uuid
import aiohttp import aiohttp
import certifi from aiohttp import ClientSession
import requests
from ..typing import Any, AsyncGenerator, CreateResult, Tuple, Union from ..typing import Any, AsyncGenerator, CreateResult, Union
from .base_provider import BaseProvider from .base_provider import AsyncGeneratorProvider, get_cookies
class Bing(BaseProvider): class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat" url = "https://bing.com/chat"
supports_gpt_4 = True needs_auth = True
working = True
supports_gpt_4 = True
supports_stream = True
@staticmethod @staticmethod
def create_completion( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, cookies: dict = None, **kwargs) -> AsyncGenerator:
**kwargs: Any,
) -> CreateResult: if not cookies:
cookies = get_cookies(".bing.com")
if len(messages) < 2: if len(messages) < 2:
prompt = messages[0]["content"] prompt = messages[0]["content"]
context = False context = None
else: else:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
context = convert(messages[:-1]) context = create_context(messages[:-1])
if cookies and "SRCHD" in cookies:
#TODO: Will implement proper cookie retrieval later and use a try-except mechanism in 'stream_generate' instead of defaulting the cookie value like this
cookies_dict = {
'SRCHD' : cookies["SRCHD"],
'PPLState' : '1',
'KievRPSSecAuth': '',
'SUID' : '',
'SRCHUSR' : '',
'SRCHHPGUSR' : '',
}
return stream_generate(prompt, context, cookies_dict)
response = run(stream_generate(prompt, jailbreak, context)) def create_context(messages: list[dict[str, str]]):
for token in response: context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
yield token
def convert(messages: list[dict[str, str]]):
context = ""
for message in messages:
context += "[%s](#message)\n%s\n\n" % (message["role"], message["content"])
return context return context
class Conversation():
def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
self.conversationId = conversationId
self.clientId = clientId
self.conversationSignature = conversationSignature
jailbreak = { async def create_conversation(session: ClientSession) -> Conversation:
"optionsSets": [ url = 'https://www.bing.com/turing/conversation/create'
"saharasugg", async with await session.get(url) as response:
"enablenewsfc", response = await response.json()
"clgalileo", conversationId = response.get('conversationId')
"gencontentv3", clientId = response.get('clientId')
"nlu_direct_response_filter", conversationSignature = response.get('conversationSignature')
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"h3precise"
# "harmonyv3",
"dtappid",
"cricinfo",
"cricinfov2",
"dv3sugg",
"nojbfedge",
]
}
if not conversationId or not clientId or not conversationSignature:
raise Exception('Failed to create conversation.')
return Conversation(conversationId, clientId, conversationSignature)
ssl_context = ssl.create_default_context() async def list_conversations(session: ClientSession) -> list:
ssl_context.load_verify_locations(certifi.where()) url = "https://www.bing.com/turing/conversation/chats"
async with session.get(url) as response:
response = await response.json()
def _format(msg: dict[str, Any]) -> str: return response["chats"]
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
async def stream_generate( json = {
prompt: str, "conversationId": conversation.conversationId,
mode: dict[str, list[str]] = jailbreak, "conversationSignature": conversation.conversationSignature,
context: Union[bool, str] = False, "participant": {"id": conversation.clientId},
):
timeout = aiohttp.ClientTimeout(total=900)
session = aiohttp.ClientSession(timeout=timeout)
conversationId, clientId, conversationSignature = await create_conversation()
wss = await session.ws_connect(
"wss://sydney.bing.com/sydney/ChatHub",
ssl=ssl_context,
autoping=False,
headers={
"accept": "application/json",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"109.0.1518.78"',
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": "",
"sec-ch-ua-platform": '"Windows"',
"sec-ch-ua-platform-version": '"15.0.0"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-ms-client-request-id": str(uuid.uuid4()),
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
"Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
"Referrer-Policy": "origin-when-cross-origin",
"x-forwarded-for": Defaults.ip_address,
},
)
await wss.send_str(_format({"protocol": "json", "version": 1}))
await wss.receive(timeout=900)
argument: dict[str, Any] = {
**mode,
"source": "cib", "source": "cib",
"allowedMessageTypes": Defaults.allowedMessageTypes, "optionsSets": ["autosave"]
"sliceIds": Defaults.sliceIds,
"traceId": os.urandom(16).hex(),
"isStartOfSession": True,
"message": Defaults.location
| {
"author": "user",
"inputMethod": "Keyboard",
"text": prompt,
"messageType": "Chat",
},
"conversationSignature": conversationSignature,
"participant": {"id": clientId},
"conversationId": conversationId,
} }
async with session.post(url, json=json) as response:
if context: response = await response.json()
argument["previousMessages"] = [ return response["result"]["value"] == "Success"
{
"author": "user",
"description": context,
"contextType": "WebPage",
"messageType": "Context",
"messageId": "discover-web--page-ping-mriduna-----",
}
]
struct: dict[str, list[dict[str, Any]] | str | int] = {
"arguments": [argument],
"invocationId": "0",
"target": "chat",
"type": 4,
}
await wss.send_str(_format(struct))
final = False
draw = False
resp_txt = ""
result_text = ""
resp_txt_no_link = ""
cache_text = ""
while not final:
msg = await wss.receive(timeout=900)
objects = msg.data.split(Defaults.delimiter) # type: ignore
for obj in objects: # type: ignore
if obj is None or not obj:
continue
response = json.loads(obj) # type: ignore
if response.get("type") == 1 and response["arguments"][0].get(
"messages",
):
if not draw:
if (
response["arguments"][0]["messages"][0]["contentOrigin"]
!= "Apology"
) and not draw:
resp_txt = result_text + response["arguments"][0]["messages"][
0
]["adaptiveCards"][0]["body"][0].get("text", "")
resp_txt_no_link = result_text + response["arguments"][0][
"messages"
][0].get("text", "")
if response["arguments"][0]["messages"][0].get(
"messageType",
):
resp_txt = (
resp_txt
+ response["arguments"][0]["messages"][0][
"adaptiveCards"
][0]["body"][0]["inlines"][0].get("text")
+ "\n"
)
result_text = (
result_text
+ response["arguments"][0]["messages"][0][
"adaptiveCards"
][0]["body"][0]["inlines"][0].get("text")
+ "\n"
)
if cache_text.endswith(" "):
final = True
if wss and not wss.closed:
await wss.close()
if session and not session.closed:
await session.close()
yield (resp_txt.replace(cache_text, ""))
cache_text = resp_txt
elif response.get("type") == 2:
if response["item"]["result"].get("error"):
if wss and not wss.closed:
await wss.close()
if session and not session.closed:
await session.close()
raise Exception(
f"{response['item']['result']['value']}: {response['item']['result']['message']}"
)
if draw:
cache = response["item"]["messages"][1]["adaptiveCards"][0]["body"][
0
]["text"]
response["item"]["messages"][1]["adaptiveCards"][0]["body"][0][
"text"
] = (cache + resp_txt)
if (
response["item"]["messages"][-1]["contentOrigin"] == "Apology"
and resp_txt
):
response["item"]["messages"][-1]["text"] = resp_txt_no_link
response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0][
"text"
] = resp_txt
# print('Preserved the message from being deleted', file=sys.stderr)
final = True
if wss and not wss.closed:
await wss.close()
if session and not session.closed:
await session.close()
async def create_conversation() -> Tuple[str, str, str]:
create = requests.get(
"https://www.bing.com/turing/conversation/create",
headers={
"authority": "edgeservices.bing.com",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"110.0.1587.69"',
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": '""',
"sec-ch-ua-platform": '"Windows"',
"sec-ch-ua-platform-version": '"15.0.0"',
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
"x-edge-shopping-flag": "1",
"x-forwarded-for": Defaults.ip_address,
},
)
conversationId = create.json().get("conversationId")
clientId = create.json().get("clientId")
conversationSignature = create.json().get("conversationSignature")
if not conversationId or not clientId or not conversationSignature:
raise Exception("Failed to create conversation.")
return conversationId, clientId, conversationSignature
class Defaults: class Defaults:
delimiter = "\x1e" delimiter = "\x1e"
@ -309,9 +109,6 @@ class Defaults:
] ]
sliceIds = [ sliceIds = [
# "222dtappid",
# "225cricinfo",
# "224locals0"
"winmuid3tf", "winmuid3tf",
"osbsdusgreccf", "osbsdusgreccf",
"ttstmout", "ttstmout",
@ -349,6 +146,149 @@ class Defaults:
], ],
} }
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'max-age=0',
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
'sec-ch-ua-arch': '"x86"',
'sec-ch-ua-bitness': '"64"',
'sec-ch-ua-full-version': '"110.0.1587.69"',
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-model': '""',
'sec-ch-ua-platform': '"Windows"',
'sec-ch-ua-platform-version': '"15.0.0"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
'x-edge-shopping-flag': '1',
'x-forwarded-for': ip_address,
}
optionsSets = {
"optionsSets": [
'saharasugg',
'enablenewsfc',
'clgalileo',
'gencontentv3',
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"h3precise"
"dtappid",
"cricinfo",
"cricinfov2",
"dv3sugg",
"nojbfedge"
]
}
def format_message(msg: dict) -> str:
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
struct = {
'arguments': [
{
**Defaults.optionsSets,
'source': 'cib',
'allowedMessageTypes': Defaults.allowedMessageTypes,
'sliceIds': Defaults.sliceIds,
'traceId': os.urandom(16).hex(),
'isStartOfSession': True,
'message': Defaults.location | {
'author': 'user',
'inputMethod': 'Keyboard',
'text': prompt,
'messageType': 'Chat'
},
'conversationSignature': conversation.conversationSignature,
'participant': {
'id': conversation.clientId
},
'conversationId': conversation.conversationId
}
],
'invocationId': '0',
'target': 'chat',
'type': 4
}
if context:
struct['arguments'][0]['previousMessages'] = [{
"author": "user",
"description": context,
"contextType": "WebPage",
"messageType": "Context",
"messageId": "discover-web--page-ping-mriduna-----"
}]
return format_message(struct)
async def stream_generate(
prompt: str,
context: str=None,
cookies: dict=None
):
async with ClientSession(
timeout=aiohttp.ClientTimeout(total=900),
cookies=cookies,
headers=Defaults.headers,
) as session:
conversation = await create_conversation(session)
try:
async with session.ws_connect(
'wss://sydney.bing.com/sydney/ChatHub',
autoping=False,
) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
msg = await wss.receive(timeout=900)
await wss.send_str(create_message(conversation, prompt, context))
response_txt = ''
result_text = ''
returned_text = ''
final = False
while not final:
msg = await wss.receive(timeout=900)
objects = msg.data.split(Defaults.delimiter)
for obj in objects:
if obj is None or not obj:
continue
response = json.loads(obj)
if response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0]
if (message['contentOrigin'] != 'Apology'):
response_txt = result_text + \
message['adaptiveCards'][0]['body'][0].get('text', '')
if message.get('messageType'):
inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
response_txt += inline_txt + '\n'
result_text += inline_txt + '\n'
if response_txt.startswith(returned_text):
new = response_txt[len(returned_text):]
if new != "\n":
yield new
returned_text = response_txt
elif response.get('type') == 2:
result = response['item']['result']
if result.get('error'):
raise Exception(f"{result['value']}: {result['message']}")
final = True
break
finally:
await delete_conversation(session, conversation)
def run(generator: AsyncGenerator[Union[Any, str], Any]): def run(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
@ -360,3 +300,4 @@ def run(generator: AsyncGenerator[Union[Any, str], Any]):
except StopAsyncIteration: except StopAsyncIteration:
break break

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import re import re
import requests import requests
@ -7,55 +9,51 @@ from .base_provider import BaseProvider
class ChatgptAi(BaseProvider): class ChatgptAi(BaseProvider):
url = "https://chatgpt.ai/gpt-4/" url: str = "https://chatgpt.ai/gpt-4/"
working = True working = True
supports_gpt_4 = True supports_gpt_4 = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult: chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
chat = "" chat += "\nassistant: "
for message in messages:
chat += "%s: %s\n" % (message["role"], message["content"])
chat += "assistant: "
response = requests.get("https://chatgpt.ai/") response = requests.get("https://chatgpt.ai/")
nonce, post_id, _, bot_id = re.findall( nonce, post_id, _, bot_id = re.findall(
r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
response.text, response.text)[0]
)[0]
headers = { headers = {
"authority": "chatgpt.ai", "authority" : "chatgpt.ai",
"accept": "*/*", "accept" : "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"cache-control": "no-cache", "cache-control" : "no-cache",
"origin": "https://chatgpt.ai", "origin" : "https://chatgpt.ai",
"pragma": "no-cache", "pragma" : "no-cache",
"referer": "https://chatgpt.ai/gpt-4/", "referer" : "https://chatgpt.ai/gpt-4/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"Windows"', "sec-ch-ua-platform" : '"Windows"',
"sec-fetch-dest": "empty", "sec-fetch-dest" : "empty",
"sec-fetch-mode": "cors", "sec-fetch-mode" : "cors",
"sec-fetch-site": "same-origin", "sec-fetch-site" : "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
} }
data = { data = {
"_wpnonce": nonce, "_wpnonce" : nonce,
"post_id": post_id, "post_id" : post_id,
"url": "https://chatgpt.ai/gpt-4", "url" : "https://chatgpt.ai/gpt-4",
"action": "wpaicg_chat_shortcode_message", "action" : "wpaicg_chat_shortcode_message",
"message": chat, "message" : chat,
"bot_id": bot_id, "bot_id" : bot_id,
} }
response = requests.post( response = requests.post(
"https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data)
)
response.raise_for_status() response.raise_for_status()
yield response.json()["data"] yield response.json()["data"]

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import base64 import base64
import os import os
import re import re
@ -9,61 +11,58 @@ from .base_provider import BaseProvider
class ChatgptLogin(BaseProvider): class ChatgptLogin(BaseProvider):
url = "https://opchatgpts.net" url = "https://opchatgpts.net"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
headers = { headers = {
"authority": "chatgptlogin.ac", "authority" : "chatgptlogin.ac",
"accept": "*/*", "accept" : "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type": "application/json", "content-type" : "application/json",
"origin": "https://opchatgpts.net", "origin" : "https://opchatgpts.net",
"referer": "https://opchatgpts.net/chatgpt-free-use/", "referer" : "https://opchatgpts.net/chatgpt-free-use/",
"sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', "sec-ch-ua" : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"Windows"', "sec-ch-ua-platform" : '"Windows"',
"sec-fetch-dest": "empty", "sec-fetch-dest" : "empty",
"sec-fetch-mode": "cors", "sec-fetch-mode" : "cors",
"sec-fetch-site": "same-origin", "sec-fetch-site" : "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"x-wp-nonce": _get_nonce(), "x-wp-nonce" : _get_nonce(),
} }
conversation = _transform(messages) conversation = _transform(messages)
json_data = { json_data = {
"env": "chatbot", "env" : "chatbot",
"session": "N/A", "session" : "N/A",
"prompt": "Converse as if you were an AI assistant. Be friendly, creative.", "prompt" : "Converse as if you were an AI assistant. Be friendly, creative.",
"context": "Converse as if you were an AI assistant. Be friendly, creative.", "context" : "Converse as if you were an AI assistant. Be friendly, creative.",
"messages": conversation, "messages" : conversation,
"newMessage": messages[-1]["content"], "newMessage" : messages[-1]["content"],
"userName": '<div class="mwai-name-text">User:</div>', "userName" : '<div class="mwai-name-text">User:</div>',
"aiName": '<div class="mwai-name-text">AI:</div>', "aiName" : '<div class="mwai-name-text">AI:</div>',
"model": "gpt-3.5-turbo", "model" : "gpt-3.5-turbo",
"temperature": kwargs.get("temperature", 0.8), "temperature" : kwargs.get("temperature", 0.8),
"maxTokens": 1024, "maxTokens" : 1024,
"maxResults": 1, "maxResults" : 1,
"apiKey": "", "apiKey" : "",
"service": "openai", "service" : "openai",
"embeddingsIndex": "", "embeddingsIndex": "",
"stop": "", "stop" : "",
"clientId": os.urandom(6).hex(), "clientId" : os.urandom(6).hex()
} }
response = requests.post( response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
"https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", headers=headers, json=json_data)
headers=headers,
json=json_data,
)
response.raise_for_status() response.raise_for_status()
yield response.json()["reply"] yield response.json()["reply"]
@ -81,24 +80,21 @@ class ChatgptLogin(BaseProvider):
def _get_nonce() -> str: def _get_nonce() -> str:
res = requests.get( res = requests.get("https://opchatgpts.net/chatgpt-free-use/",
"https://opchatgpts.net/chatgpt-free-use/", headers = {
headers={ "Referer" : "https://opchatgpts.net/chatgpt-free-use/",
"Referer": "https://opchatgpts.net/chatgpt-free-use/", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"})
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
},
)
result = re.search( result = re.search(
r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
res.text, res.text)
)
if result is None: if result is None:
return "" return ""
src = result.group(1) src = result.group(1)
decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8") decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
result = re.search(r"let restNonce = '(.*?)';", decoded_string) result = re.search(r"let restNonce = '(.*?)';", decoded_string)
return "" if result is None else result.group(1) return "" if result is None else result.group(1)
@ -106,11 +102,11 @@ def _get_nonce() -> str:
def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]: def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
return [ return [
{ {
"id": os.urandom(6).hex(), "id" : os.urandom(6).hex(),
"role": message["role"], "role" : message["role"],
"content": message["content"], "content": message["content"],
"who": "AI: " if message["role"] == "assistant" else "User: ", "who" : "AI: " if message["role"] == "assistant" else "User: ",
"html": _html_encode(message["content"]), "html" : _html_encode(message["content"]),
} }
for message in messages for message in messages
] ]
@ -118,14 +114,14 @@ def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
def _html_encode(string: str) -> str: def _html_encode(string: str) -> str:
table = { table = {
'"': "&quot;", '"' : "&quot;",
"'": "&#39;", "'" : "&#39;",
"&": "&amp;", "&" : "&amp;",
">": "&gt;", ">" : "&gt;",
"<": "&lt;", "<" : "&lt;",
"\n": "<br>", "\n": "<br>",
"\t": "&nbsp;&nbsp;&nbsp;&nbsp;", "\t": "&nbsp;&nbsp;&nbsp;&nbsp;",
" ": "&nbsp;", " " : "&nbsp;",
} }
for key in table: for key in table:

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import js2py import js2py
@ -8,19 +10,17 @@ from .base_provider import BaseProvider
class DeepAi(BaseProvider): class DeepAi(BaseProvider):
url = "https://deepai.org" url: str = "https://deepai.org"
working = True working = True
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
url = "https://api.deepai.org/make_me_a_pizza"
token_js = """ token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
@ -54,7 +54,9 @@ f = function () {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
} }
response = requests.post(url, headers=headers, data=payload, stream=True) response = requests.post("https://api.deepai.org/make_me_a_pizza",
headers=headers, data=payload, stream=True)
for chunk in response.iter_content(chunk_size=None): for chunk in response.iter_content(chunk_size=None):
response.raise_for_status() response.raise_for_status()
yield chunk.decode() yield chunk.decode()

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import re import re
import time import time
@ -9,48 +11,45 @@ from .base_provider import BaseProvider
class DfeHub(BaseProvider): class DfeHub(BaseProvider):
url = "https://chat.dfehub.com/api/chat" url = "https://chat.dfehub.com/"
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
headers = { headers = {
"authority": "chat.dfehub.com", "authority" : "chat.dfehub.com",
"accept": "*/*", "accept" : "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type": "application/json", "content-type" : "application/json",
"origin": "https://chat.dfehub.com", "origin" : "https://chat.dfehub.com",
"referer": "https://chat.dfehub.com/", "referer" : "https://chat.dfehub.com/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"macOS"', "sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty", "sec-fetch-dest" : "empty",
"sec-fetch-mode": "cors", "sec-fetch-mode" : "cors",
"sec-fetch-site": "same-origin", "sec-fetch-site" : "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"x-requested-with": "XMLHttpRequest", "x-requested-with" : "XMLHttpRequest",
} }
json_data = { json_data = {
"messages": messages, "messages" : messages,
"model": "gpt-3.5-turbo", "model" : "gpt-3.5-turbo",
"temperature": kwargs.get("temperature", 0.5), "temperature" : kwargs.get("temperature", 0.5),
"presence_penalty": kwargs.get("presence_penalty", 0), "presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty": kwargs.get("frequency_penalty", 0), "frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p": kwargs.get("top_p", 1), "top_p" : kwargs.get("top_p", 1),
"stream": True, "stream" : True
} }
response = requests.post(
"https://chat.dfehub.com/api/openai/v1/chat/completions", response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
headers=headers, headers=headers, json=json_data, timeout=3)
json=json_data,
)
for chunk in response.iter_lines(): for chunk in response.iter_lines():
if b"detail" in chunk: if b"detail" in chunk:

View File

@ -1,4 +1,7 @@
from __future__ import annotations
import json import json
import random
import requests import requests
@ -7,18 +10,17 @@ from .base_provider import BaseProvider
class EasyChat(BaseProvider): class EasyChat(BaseProvider):
url = "https://free.easychat.work" url: str = "https://free.easychat.work"
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
active_servers = [ active_servers = [
"https://chat10.fastgpt.me", "https://chat10.fastgpt.me",
"https://chat9.fastgpt.me", "https://chat9.fastgpt.me",
@ -28,64 +30,69 @@ class EasyChat(BaseProvider):
"https://chat4.fastgpt.me", "https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me" "https://gxos1h1ddt.fastgpt.me"
] ]
server = active_servers[kwargs.get("active_server", 0)]
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = { headers = {
"authority": f"{server}".replace("https://", ""), "authority" : f"{server}".replace("https://", ""),
"accept": "text/event-stream", "accept" : "text/event-stream",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
"content-type": "application/json", "content-type" : "application/json",
"origin": f"{server}", "origin" : f"{server}",
"referer": f"{server}/", "referer" : f"{server}/",
"x-requested-with": "XMLHttpRequest", "x-requested-with" : "XMLHttpRequest",
'plugins': '0', 'plugins' : '0',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0', 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"', 'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty', 'sec-fetch-dest' : 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-mode' : 'cors',
'sec-fetch-site': 'same-origin', 'sec-fetch-site' : 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'usesearch': 'false', 'usesearch' : 'false',
'x-requested-with': 'XMLHttpRequest' 'x-requested-with' : 'XMLHttpRequest'
} }
json_data = { json_data = {
"messages": messages, "messages" : messages,
"stream": stream, "stream" : stream,
"model": model, "model" : model,
"temperature": kwargs.get("temperature", 0.5), "temperature" : kwargs.get("temperature", 0.5),
"presence_penalty": kwargs.get("presence_penalty", 0), "presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty": kwargs.get("frequency_penalty", 0), "frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p": kwargs.get("top_p", 1), "top_p" : kwargs.get("top_p", 1)
} }
session = requests.Session() session = requests.Session()
# init cookies from server # init cookies from server
session.get(f"{server}/") session.get(f"{server}/")
response = session.post( response = session.post(f"{server}/api/openai/v1/chat/completions",
f"{server}/api/openai/v1/chat/completions", headers=headers, json=json_data, stream=stream)
headers=headers,
json=json_data,
stream=stream,
)
if response.status_code == 200: if response.status_code == 200:
if stream == False: if stream == False:
json_data = response.json() json_data = response.json()
if "choices" in json_data: if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"] yield json_data["choices"][0]["message"]["content"]
else: else:
yield Exception("No response from server") raise Exception("No response from server")
else: else:
for chunk in response.iter_lines(): for chunk in response.iter_lines():
if b"content" in chunk: if b"content" in chunk:
splitData = chunk.decode().split("data: ") splitData = chunk.decode().split("data:")
if len(splitData) > 1: if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"] yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
else:
continue
else: else:
yield Exception(f"Error {response.status_code} from server") raise Exception(f"Error {response.status_code} from server : {response.reason}")
@classmethod @classmethod
@property @property

View File

@ -1,58 +1,62 @@
import requests, json from __future__ import annotations
import json
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
class Equing(ABC): class Equing(ABC):
url: str = 'https://next.eqing.tech/' url: str = 'https://next.eqing.tech/'
working = True working = True
needs_auth = False needs_auth = False
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = False supports_gpt_4 = False
@staticmethod @staticmethod
@abstractmethod @abstractmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any) -> CreateResult:
headers = { headers = {
'authority': 'next.eqing.tech', 'authority' : 'next.eqing.tech',
'accept': 'text/event-stream', 'accept' : 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache', 'cache-control' : 'no-cache',
'content-type': 'application/json', 'content-type' : 'application/json',
'origin': 'https://next.eqing.tech', 'origin' : 'https://next.eqing.tech',
'plugins': '0', 'plugins' : '0',
'pragma': 'no-cache', 'pragma' : 'no-cache',
'referer': 'https://next.eqing.tech/', 'referer' : 'https://next.eqing.tech/',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0', 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"', 'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty', 'sec-fetch-dest' : 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-mode' : 'cors',
'sec-fetch-site': 'same-origin', 'sec-fetch-site' : 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false', 'usesearch' : 'false',
'x-requested-with': 'XMLHttpRequest', 'x-requested-with' : 'XMLHttpRequest'
} }
json_data = { json_data = {
'messages': messages, 'messages' : messages,
'stream': stream, 'stream' : stream,
'model': model, 'model' : model,
'temperature': kwargs.get('temperature', 0.5), 'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0), 'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0), 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1), 'top_p' : kwargs.get('top_p', 1),
} }
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream) headers=headers, json=json_data, stream=stream)
if not stream: if not stream:
yield response.json()["choices"][0]["message"]["content"] yield response.json()["choices"][0]["message"]["content"]
return return

View File

@ -1,55 +1,58 @@
import requests, json, random from __future__ import annotations
import json
import random
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
class FastGpt(ABC): class FastGpt(ABC):
url: str = 'https://chat9.fastgpt.me/' url: str = 'https://chat9.fastgpt.me/'
working = True working = False
needs_auth = False needs_auth = False
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = False supports_gpt_4 = False
@staticmethod @staticmethod
@abstractmethod @abstractmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any) -> CreateResult:
headers = { headers = {
'authority': 'chat9.fastgpt.me', 'authority' : 'chat9.fastgpt.me',
'accept': 'text/event-stream', 'accept' : 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache', 'cache-control' : 'no-cache',
'content-type': 'application/json', 'content-type' : 'application/json',
# 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597', 'origin' : 'https://chat9.fastgpt.me',
'origin': 'https://chat9.fastgpt.me', 'plugins' : '0',
'plugins': '0', 'pragma' : 'no-cache',
'pragma': 'no-cache', 'referer' : 'https://chat9.fastgpt.me/',
'referer': 'https://chat9.fastgpt.me/', 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"', 'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty', 'sec-fetch-dest' : 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-mode' : 'cors',
'sec-fetch-site': 'same-origin', 'sec-fetch-site' : 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false', 'usesearch' : 'false',
'x-requested-with': 'XMLHttpRequest', 'x-requested-with' : 'XMLHttpRequest',
} }
json_data = { json_data = {
'messages': messages, 'messages' : messages,
'stream': stream, 'stream' : stream,
'model': model, 'model' : model,
'temperature': kwargs.get('temperature', 0.5), 'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0), 'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0), 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1), 'top_p' : kwargs.get('top_p', 1),
} }
subdomain = random.choice([ subdomain = random.choice([
@ -58,7 +61,7 @@ class FastGpt(ABC):
]) ])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions', response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream) headers=headers, json=json_data, stream=stream)
for line in response.iter_lines(): for line in response.iter_lines():
if line: if line:

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import requests import requests
@ -7,34 +9,31 @@ from .base_provider import BaseProvider
class Forefront(BaseProvider): class Forefront(BaseProvider):
url = "https://forefront.com" url = "https://forefront.com"
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
json_data = { json_data = {
"text": messages[-1]["content"], "text" : messages[-1]["content"],
"action": "noauth", "action" : "noauth",
"id": "", "id" : "",
"parentId": "", "parentId" : "",
"workspaceId": "", "workspaceId" : "",
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
"model": "gpt-4", "model" : "gpt-4",
"messages": messages[:-1] if len(messages) > 1 else [], "messages" : messages[:-1] if len(messages) > 1 else [],
"internetMode": "auto", "internetMode" : "auto",
} }
response = requests.post( response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
"https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", json=json_data, stream=True)
json=json_data,
stream=True,
)
response.raise_for_status() response.raise_for_status()
for token in response.iter_lines(): for token in response.iter_lines():
if b"delta" in token: if b"delta" in token:

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import os import os
import uuid import uuid
@ -10,78 +12,77 @@ from .base_provider import BaseProvider
class GetGpt(BaseProvider): class GetGpt(BaseProvider):
url = "https://chat.getgpt.world/" url = 'https://chat.getgpt.world/'
supports_stream = True supports_stream = True
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
headers = { headers = {
"Content-Type": "application/json", 'Content-Type' : 'application/json',
"Referer": "https://chat.getgpt.world/", 'Referer' : 'https://chat.getgpt.world/',
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
} }
data = json.dumps( data = json.dumps(
{ {
"messages": messages, 'messages' : messages,
"frequency_penalty": kwargs.get("frequency_penalty", 0), 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
"max_tokens": kwargs.get("max_tokens", 4000), 'max_tokens' : kwargs.get('max_tokens', 4000),
"model": "gpt-3.5-turbo", 'model' : 'gpt-3.5-turbo',
"presence_penalty": kwargs.get("presence_penalty", 0), 'presence_penalty' : kwargs.get('presence_penalty', 0),
"temperature": kwargs.get("temperature", 1), 'temperature' : kwargs.get('temperature', 1),
"top_p": kwargs.get("top_p", 1), 'top_p' : kwargs.get('top_p', 1),
"stream": True, 'stream' : True,
"uuid": str(uuid.uuid4()), 'uuid' : str(uuid.uuid4())
} }
) )
res = requests.post( res = requests.post('https://chat.getgpt.world/api/chat/stream',
"https://chat.getgpt.world/api/chat/stream", headers=headers, json={'signature': _encrypt(data)}, stream=True)
headers=headers,
json={"signature": _encrypt(data)},
stream=True,
)
res.raise_for_status() res.raise_for_status()
for line in res.iter_lines(): for line in res.iter_lines():
if b"content" in line: if b'content' in line:
line_json = json.loads(line.decode("utf-8").split("data: ")[1]) line_json = json.loads(line.decode('utf-8').split('data: ')[1])
yield (line_json["choices"][0]["delta"]["content"]) yield (line_json['choices'][0]['delta']['content'])
@classmethod @classmethod
@property @property
def params(cls): def params(cls):
params = [ params = [
("model", "str"), ('model', 'str'),
("messages", "list[dict[str, str]]"), ('messages', 'list[dict[str, str]]'),
("stream", "bool"), ('stream', 'bool'),
("temperature", "float"), ('temperature', 'float'),
("presence_penalty", "int"), ('presence_penalty', 'int'),
("frequency_penalty", "int"), ('frequency_penalty', 'int'),
("top_p", "int"), ('top_p', 'int'),
("max_tokens", "int"), ('max_tokens', 'int'),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ', '.join([': '.join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f'g4f.provider.{cls.__name__} supports: ({param})'
def _encrypt(e: str): def _encrypt(e: str):
t = os.urandom(8).hex().encode("utf-8") t = os.urandom(8).hex().encode('utf-8')
n = os.urandom(8).hex().encode("utf-8") n = os.urandom(8).hex().encode('utf-8')
r = e.encode("utf-8") r = e.encode('utf-8')
cipher = AES.new(t, AES.MODE_CBC, n)
cipher = AES.new(t, AES.MODE_CBC, n)
ciphertext = cipher.encrypt(_pad_data(r)) ciphertext = cipher.encrypt(_pad_data(r))
return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
def _pad_data(data: bytes) -> bytes: def _pad_data(data: bytes) -> bytes:
block_size = AES.block_size block_size = AES.block_size
padding_size = block_size - len(data) % block_size padding_size = block_size - len(data) % block_size
padding = bytes([padding_size] * padding_size) padding = bytes([padding_size] * padding_size)
return data + padding return data + padding

View File

@ -1,86 +1,88 @@
from __future__ import annotations
import json import json
import uuid import uuid
import requests from aiohttp import ClientSession
from ..typing import Any, CreateResult from ..typing import AsyncGenerator
from .base_provider import BaseProvider from .base_provider import AsyncGeneratorProvider, format_prompt
class H2o(BaseProvider): class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai" url = "https://gpt-gm.h2o.ai"
working = True working = True
supports_stream = True supports_stream = True
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@staticmethod @classmethod
def create_completion( async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, proxy: str = None,
**kwargs: Any, **kwargs
) -> CreateResult: ) -> AsyncGenerator:
conversation = "" model = model if model else cls.model
for message in messages:
conversation += "%s: %s\n" % (message["role"], message["content"])
conversation += "assistant: "
session = requests.Session()
headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
data = {
"ethicsModalAccepted": "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": model,
"searchEnabled": "true",
}
session.post(
"https://gpt-gm.h2o.ai/settings",
headers=headers,
data=data,
)
headers = {"Referer": "https://gpt-gm.h2o.ai/"} headers = {"Referer": "https://gpt-gm.h2o.ai/"}
data = {"model": model}
response = session.post( async with ClientSession(
"https://gpt-gm.h2o.ai/conversation", headers=headers
headers=headers, ) as session:
json=data, data = {
) "ethicsModalAccepted": "true",
conversation_id = response.json()["conversationId"] "shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": model,
"searchEnabled": "true",
}
async with session.post(
"https://gpt-gm.h2o.ai/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
data = { async with session.post(
"inputs": conversation, "https://gpt-gm.h2o.ai/conversation",
"parameters": { proxy=proxy,
"temperature": kwargs.get("temperature", 0.4), json={"model": model},
"truncate": kwargs.get("truncate", 2048), ) as response:
"max_new_tokens": kwargs.get("max_new_tokens", 1024), response.raise_for_status()
"do_sample": kwargs.get("do_sample", True), conversationId = (await response.json())["conversationId"]
"repetition_penalty": kwargs.get("repetition_penalty", 1.2),
"return_full_text": kwargs.get("return_full_text", False),
},
"stream": True,
"options": {
"id": kwargs.get("id", str(uuid.uuid4())),
"response_id": kwargs.get("response_id", str(uuid.uuid4())),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
response = session.post( data = {
f"https://gpt-gm.h2o.ai/conversation/{conversation_id}", "inputs": format_prompt(messages),
headers=headers, "parameters": {
json=data, "temperature": 0.4,
) "truncate": 2048,
response.raise_for_status() "max_new_tokens": 1024,
response.encoding = "utf-8" "do_sample": True,
generated_text = response.text.replace("\n", "").split("data:") "repetition_penalty": 1.2,
generated_text = json.loads(generated_text[-1]) "return_full_text": False,
**kwargs
yield generated_text["generated_text"] },
"stream": True,
"options": {
"id": str(uuid.uuid4()),
"response_id": str(uuid.uuid4()),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
async with session.post(
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
start = "data:"
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):-1])
if not line["token"]["special"]:
yield line["token"]["text"]
@classmethod @classmethod
@property @property

110
g4f/Provider/HuggingChat.py Normal file
View File

@ -0,0 +1,110 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat/"
needs_auth = True
working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies(".huggingface.co")
model = model if model else cls.model
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
conversation_id = (await response.json())["conversationId"]
send = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.2,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
}
start = "data:"
first = True
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if not stream:
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
raise RuntimeError(f"No json: {line}")
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
else:
raise RuntimeError(f"Response: {line}")
elif line.startswith(start):
line = json.loads(line[len(start):-1])
if not line:
continue
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,64 +1,81 @@
from __future__ import annotations
import json
import uuid import uuid
import requests from aiohttp import ClientSession
from ..typing import Any, CreateResult from ..typing import AsyncGenerator
from .base_provider import BaseProvider from .base_provider import AsyncGeneratorProvider
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
"gpt-3.5-turbo-16k": {
"id": "gpt-3.5-turbo-16k",
"name": "GPT-3.5-16k",
"maxLength": 48000,
"tokenLimit": 16000,
},
}
class Liaobots(BaseProvider): class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com" url = "https://liaobots.com"
working = True
supports_stream = True supports_stream = True
needs_auth = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
_auth_code = None
@staticmethod @classmethod
def create_completion( async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, auth: str = None,
**kwargs: Any, proxy: str = None,
) -> CreateResult: **kwargs
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = { headers = {
"authority": "liaobots.com", "authority": "liaobots.com",
"content-type": "application/json", "content-type": "application/json",
"origin": "https://liaobots.com", "origin": "https://liaobots.com",
"referer": "https://liaobots.com/", "referer": "https://liaobots.com/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
"x-auth-code": str(kwargs.get("auth")),
}
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
}
json_data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
"messages": messages,
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
} }
async with ClientSession(
headers=headers
) as session:
model = model if model in models else "gpt-3.5-turbo"
auth_code = auth if isinstance(auth, str) else cls._auth_code
if not auth_code:
async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
response.raise_for_status()
auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
"messages": messages,
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
}
async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
response.raise_for_status()
async for line in response.content:
yield line.decode("utf-8")
response = requests.post(
"https://liaobots.com/api/chat",
headers=headers,
json=json_data,
stream=True,
)
response.raise_for_status()
for token in response.iter_content(chunk_size=2046):
yield token.decode("utf-8")
@classmethod @classmethod
@property @property
@ -67,6 +84,7 @@ class Liaobots(BaseProvider):
("model", "str"), ("model", "str"),
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"),
("auth", "str"), ("auth", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import requests import requests
@ -7,46 +9,42 @@ from .base_provider import BaseProvider
class Lockchat(BaseProvider): class Lockchat(BaseProvider):
url = "http://supertest.lockchat.app" url: str = "http://supertest.lockchat.app"
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
temperature = float(kwargs.get("temperature", 0.7)) temperature = float(kwargs.get("temperature", 0.7))
payload = { payload = {
"temperature": temperature, "temperature": temperature,
"messages": messages, "messages" : messages,
"model": model, "model" : model,
"stream": True, "stream" : True,
} }
headers = { headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
} }
response = requests.post( response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
"http://supertest.lockchat.app/v1/chat/completions", json=payload, headers=headers, stream=True)
json=payload,
headers=headers,
stream=True,
)
response.raise_for_status() response.raise_for_status()
for token in response.iter_lines(): for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token: if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...") print("error, retrying...")
Lockchat.create_completion( Lockchat.create_completion(
model=model, model = model,
messages=messages, messages = messages,
stream=stream, stream = stream,
temperature=temperature, temperature = temperature,
**kwargs, **kwargs)
)
if b"content" in token: if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1]) token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content") token = token["choices"][0]["delta"].get("content")

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import requests import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
@ -5,33 +7,30 @@ from .base_provider import BaseProvider
class Opchatgpts(BaseProvider): class Opchatgpts(BaseProvider):
url = "https://opchatgpts.net" url = "https://opchatgpts.net"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult: temperature = kwargs.get("temperature", 0.8)
temperature = kwargs.get("temperature", 0.8) max_tokens = kwargs.get("max_tokens", 1024)
max_tokens = kwargs.get("max_tokens", 1024)
system_prompt = kwargs.get( system_prompt = kwargs.get(
"system_prompt", "system_prompt",
"Converse as if you were an AI assistant. Be friendly, creative.", "Converse as if you were an AI assistant. Be friendly, creative.")
)
payload = _create_payload( payload = _create_payload(
messages=messages, messages = messages,
temperature=temperature, temperature = temperature,
max_tokens=max_tokens, max_tokens = max_tokens,
system_prompt=system_prompt, system_prompt = system_prompt)
)
response = requests.post( response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
"https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload
)
response.raise_for_status() response.raise_for_status()
yield response.json()["reply"] yield response.json()["reply"]
@ -39,24 +38,23 @@ class Opchatgpts(BaseProvider):
def _create_payload( def _create_payload(
messages: list[dict[str, str]], messages: list[dict[str, str]],
temperature: float, temperature: float,
max_tokens: int, max_tokens: int, system_prompt: str) -> dict:
system_prompt: str,
):
return { return {
"env": "chatbot", "env" : "chatbot",
"session": "N/A", "session" : "N/A",
"prompt": "\n", "prompt" : "\n",
"context": system_prompt, "context" : system_prompt,
"messages": messages, "messages" : messages,
"newMessage": messages[::-1][0]["content"], "newMessage" : messages[::-1][0]["content"],
"userName": '<div class="mwai-name-text">User:</div>', "userName" : '<div class="mwai-name-text">User:</div>',
"aiName": '<div class="mwai-name-text">AI:</div>', "aiName" : '<div class="mwai-name-text">AI:</div>',
"model": "gpt-3.5-turbo", "model" : "gpt-3.5-turbo",
"temperature": temperature, "temperature" : temperature,
"maxTokens": max_tokens, "maxTokens" : max_tokens,
"maxResults": 1, "maxResults" : 1,
"apiKey": "", "apiKey" : "",
"service": "openai", "service" : "openai",
"embeddingsIndex": "", "embeddingsIndex" : "",
"stop": "", "stop" : "",
} }

View File

@ -0,0 +1,102 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = True
model = "OA_SFT_Llama_30B_6"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
cookies: dict = None,
**kwargs: Any
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies("open-assistant.io")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
chat_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
"parent_id": None
}
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
parent_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"parent_id": parent_id,
"model_config_name": model if model else cls.model,
"sampling_parameters":{
"top_k": 50,
"top_p": None,
"typical_p": None,
"temperature": 0.35,
"repetition_penalty": 1.1111111111111112,
"max_new_tokens": 1024,
**kwargs
},
"plugins":[]
}
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
data = await response.json()
if "id" in data:
message_id = data["id"]
elif "message" in data:
raise RuntimeError(data["message"])
else:
response.raise_for_status()
params = {
'chat_id': chat_id,
'message_id': message_id,
}
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
start = "data: "
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):])
if line["event_type"] == "token":
yield line["text"]
params = {
'chat_id': chat_id,
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -0,0 +1,86 @@
from __future__ import annotations
has_module = True
try:
from revChatGPT.V1 import AsyncChatbot
except ImportError:
has_module = False
import json
from httpx import AsyncClient
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class OpenaiChat(AsyncGeneratorProvider):
url = "https://chat.openai.com"
needs_auth = True
working = has_module
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
_access_token = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
access_token: str = _access_token,
cookies: dict = None,
**kwargs: dict
) -> AsyncGenerator:
config = {"access_token": access_token, "model": model}
if proxy:
if "://" not in proxy:
proxy = f"http://{proxy}"
config["proxy"] = proxy
bot = AsyncChatbot(
config=config
)
if not access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com")
cls._access_token = await get_access_token(bot.session, cookies)
bot.set_access_token(cls._access_token)
returned = None
async for message in bot.ask(format_prompt(messages)):
message = message["message"]
if returned:
if message.startswith(returned):
new = message[len(returned):]
if new:
yield new
else:
yield message
returned = message
await bot.delete_conversation(bot.conversation_id)
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_access_token(session: AsyncClient, cookies: dict):
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
response.raise_for_status()
try:
return response.json()["accessToken"]
except json.decoder.JSONDecodeError:
raise RuntimeError(f"Response: {response.text}")

View File

@ -1,17 +1,20 @@
from __future__ import annotations
import json import json
import requests import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
class Raycast(BaseProvider): class Raycast(BaseProvider):
url = "https://raycast.com" url = "https://raycast.com"
# model = ['gpt-3.5-turbo', 'gpt-4'] supports_gpt_35_turbo = True
supports_gpt_35_turbo = True supports_gpt_4 = True
supports_gpt_4 = True supports_stream = True
supports_stream = True needs_auth = True
needs_auth = True working = True
working = True
@staticmethod @staticmethod
def create_completion( def create_completion(

View File

@ -1,74 +1,75 @@
import json,random,requests from __future__ import annotations
# from curl_cffi import requests
import json
import random
import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
class Theb(BaseProvider): class Theb(BaseProvider):
url = "https://theb.ai" url = "https://theb.ai"
working = True working = True
supports_stream = True supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
needs_auth = True needs_auth = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult: conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
conversation = '' conversation += "\nassistant: "
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant: '
auth = kwargs.get("auth", { auth = kwargs.get("auth", {
"bearer_token":"free", "bearer_token":"free",
"org_id":"theb", "org_id":"theb",
}) })
bearer_token = auth["bearer_token"] bearer_token = auth["bearer_token"]
org_id = auth["org_id"] org_id = auth["org_id"]
headers = { headers = {
'authority': 'beta.theb.ai', 'authority' : 'beta.theb.ai',
'accept': 'text/event-stream', 'accept' : 'text/event-stream',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization': 'Bearer '+bearer_token, 'authorization' : 'Bearer '+bearer_token,
'content-type': 'application/json', 'content-type' : 'application/json',
'origin': 'https://beta.theb.ai', 'origin' : 'https://beta.theb.ai',
'referer': 'https://beta.theb.ai/home', 'referer' : 'https://beta.theb.ai/home',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0', 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"', 'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty', 'sec-fetch-dest' : 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-mode' : 'cors',
'sec-fetch-site': 'same-origin', 'sec-fetch-site' : 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8', 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
} }
# generate 10 random number
# 0.1 - 0.9
req_rand = random.randint(100000000, 9999999999) req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = { json_data: dict[str, Any] = {
"text": conversation, "text" : conversation,
"category": "04f58f64a4aa4191a957b47290fee864", "category" : "04f58f64a4aa4191a957b47290fee864",
"model": "ee8d4f29cb7047f78cbe84313ed6ace8", "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": { "model_params": {
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
"temperature": kwargs.get("temperature", 1), "temperature" : kwargs.get("temperature", 1),
"top_p": kwargs.get("top_p", 1), "top_p" : kwargs.get("top_p", 1),
"frequency_penalty": kwargs.get("frequency_penalty", 0), "frequency_penalty" : kwargs.get("frequency_penalty", 0),
"presence_penalty": kwargs.get("presence_penalty", 0), "presence_penalty" : kwargs.get("presence_penalty", 0),
"long_term_memory": "auto" "long_term_memory" : "auto"
} }
} }
response = requests.post(
"https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand), response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
headers=headers, headers=headers, json=json_data, stream=True)
json=json_data,
stream=True,
)
response.raise_for_status() response.raise_for_status()
content = "" content = ""
next_content = "" next_content = ""

View File

@ -1,52 +1,57 @@
import uuid, requests from __future__ import annotations
import uuid
import requests
from ..typing import Any, CreateResult from ..typing import Any, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
class V50(BaseProvider): class V50(BaseProvider):
url = 'https://p5.v50.ltd' url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = False supports_stream = False
needs_auth = False needs_auth = False
working = True working = False
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant: ' conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
conversation += "\nassistant: "
payload = { payload = {
"prompt": conversation, "prompt" : conversation,
"options": {}, "options" : {},
"systemMessage": ".", "systemMessage" : ".",
"temperature": kwargs.get("temperature", 0.4), "temperature" : kwargs.get("temperature", 0.4),
"top_p": kwargs.get("top_p", 0.4), "top_p" : kwargs.get("top_p", 0.4),
"model": model, "model" : model,
"user": str(uuid.uuid4()) "user" : str(uuid.uuid4())
} }
headers = { headers = {
'authority': 'p5.v50.ltd', 'authority' : 'p5.v50.ltd',
'accept': 'application/json, text/plain, */*', 'accept' : 'application/json, text/plain, */*',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/json', 'content-type' : 'application/json',
'origin': 'https://p5.v50.ltd', 'origin' : 'https://p5.v50.ltd',
'referer': 'https://p5.v50.ltd/', 'referer' : 'https://p5.v50.ltd/',
'sec-ch-ua-platform': '"Windows"', 'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty', 'sec-fetch-dest' : 'empty',
'sec-fetch-mode': 'cors', 'sec-fetch-mode' : 'cors',
'sec-fetch-site': 'same-origin', 'sec-fetch-site' : 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
} }
response = requests.post("https://p5.v50.ltd/api/chat-process", response = requests.post("https://p5.v50.ltd/api/chat-process",
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
yield response.text
if "https://fk1.v50.ltd" not in response.text:
yield response.text
@classmethod @classmethod
@property @property

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import base64 import base64
import json import json
import uuid import uuid
@ -10,17 +12,16 @@ from .base_provider import BaseProvider
class Vercel(BaseProvider): class Vercel(BaseProvider):
url = "https://play.vercel.ai" url = "https://play.vercel.ai"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
if model in ["gpt-3.5-turbo", "gpt-4"]: if model in ["gpt-3.5-turbo", "gpt-4"]:
model = "openai:" + model model = "openai:" + model
yield _chat(model_id=model, messages=messages) yield _chat(model_id=model, messages=messages)
@ -29,8 +30,8 @@ class Vercel(BaseProvider):
def _chat(model_id: str, messages: list[dict[str, str]]) -> str: def _chat(model_id: str, messages: list[dict[str, str]]) -> str:
session = requests.Session(impersonate="chrome107") session = requests.Session(impersonate="chrome107")
url = "https://sdk.vercel.ai/api/generate" url = "https://sdk.vercel.ai/api/generate"
header = _create_header(session) header = _create_header(session)
payload = _create_payload(model_id, messages) payload = _create_payload(model_id, messages)
response = session.post(url=url, headers=header, json=payload) response = session.post(url=url, headers=header, json=payload)
@ -44,15 +45,13 @@ def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str,
"messages": messages, "messages": messages,
"playgroundId": str(uuid.uuid4()), "playgroundId": str(uuid.uuid4()),
"chatIndex": 0, "chatIndex": 0,
"model": model_id, "model": model_id} | default_params
} | default_params
def _create_header(session: requests.Session): def _create_header(session: requests.Session):
custom_encoding = _get_custom_encoding(session) custom_encoding = _get_custom_encoding(session)
return {"custom-encoding": custom_encoding} return {"custom-encoding": custom_encoding}
# based on https://github.com/ading2210/vercel-llm-api # based on https://github.com/ading2210/vercel-llm-api
def _get_custom_encoding(session: requests.Session): def _get_custom_encoding(session: requests.Session):
url = "https://sdk.vercel.ai/openai.jpeg" url = "https://sdk.vercel.ai/openai.jpeg"

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import random import random
import string import string
@ -10,65 +12,62 @@ from .base_provider import BaseProvider
class Wewordle(BaseProvider): class Wewordle(BaseProvider):
url = "https://wewordle.org/gptapi/v1/android/turbo" url = "https://wewordle.org/"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @classmethod
def create_completion( def create_completion(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
base = ""
for message in messages:
base += "%s: %s\n" % (message["role"], message["content"])
base += "assistant:"
# randomize user id and app id # randomize user id and app id
_user_id = "".join( _user_id = "".join(
random.choices(f"{string.ascii_lowercase}{string.digits}", k=16) random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
)
_app_id = "".join( _app_id = "".join(
random.choices(f"{string.ascii_lowercase}{string.digits}", k=31) random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
)
# make current date with format utc # make current date with format utc
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
headers = { headers = {
"accept": "*/*", "accept" : "*/*",
"pragma": "no-cache", "pragma" : "no-cache",
"Content-Type": "application/json", "Content-Type" : "application/json",
"Connection": "keep-alive" "Connection" : "keep-alive"
# user agent android client # user agent android client
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)', # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
} }
data: dict[str, Any] = { data: dict[str, Any] = {
"user": _user_id, "user" : _user_id,
"messages": [{"role": "user", "content": base}], "messages" : messages,
"subscriber": { "subscriber": {
"originalPurchaseDate": None, "originalPurchaseDate" : None,
"originalApplicationVersion": None, "originalApplicationVersion" : None,
"allPurchaseDatesMillis": {}, "allPurchaseDatesMillis" : {},
"entitlements": {"active": {}, "all": {}}, "entitlements" : {"active": {}, "all": {}},
"allPurchaseDates": {}, "allPurchaseDates" : {},
"allExpirationDatesMillis": {}, "allExpirationDatesMillis" : {},
"allExpirationDates": {}, "allExpirationDates" : {},
"originalAppUserId": f"$RCAnonymousID:{_app_id}", "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
"latestExpirationDate": None, "latestExpirationDate" : None,
"requestDate": _request_date, "requestDate" : _request_date,
"latestExpirationDateMillis": None, "latestExpirationDateMillis" : None,
"nonSubscriptionTransactions": [], "nonSubscriptionTransactions" : [],
"originalPurchaseDateMillis": None, "originalPurchaseDateMillis" : None,
"managementURL": None, "managementURL" : None,
"allPurchasedProductIdentifiers": [], "allPurchasedProductIdentifiers": [],
"firstSeen": _request_date, "firstSeen" : _request_date,
"activeSubscriptions": [], "activeSubscriptions" : [],
}, }
} }
url = "https://wewordle.org/gptapi/v1/android/turbo" response = requests.post(f"{cls.url}gptapi/v1/android/turbo",
response = requests.post(url, headers=headers, data=json.dumps(data)) headers=headers, data=json.dumps(data))
response.raise_for_status() response.raise_for_status()
_json = response.json() _json = response.json()
if "message" in _json: if "message" in _json:

68
g4f/Provider/Wuguokai.py Normal file
View File

@ -0,0 +1,68 @@
from __future__ import annotations
import random
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class Wuguokai(BaseProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
working = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
base = ''
for message in messages:
base += '%s: %s\n' % (message['role'], message['content'])
base += 'assistant:'
headers = {
'authority': 'ai-api.wuguokai.xyz',
'accept': 'application/json, text/plain, */*',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/json',
'origin': 'https://chat.wuguokai.xyz',
'referer': 'https://chat.wuguokai.xyz/',
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
}
data ={
"prompt": base,
"options": {},
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
if response.status_code == 200:
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()
else:
raise Exception(f"Error: {response.status_code} {response.reason}")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool")
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,59 +1,40 @@
import re from __future__ import annotations
import urllib.parse
from curl_cffi import requests import json
from ..typing import Any, CreateResult from aiohttp import ClientSession
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class You(BaseProvider): class You(AsyncGeneratorProvider):
url = "https://you.com" url = "https://you.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True
@staticmethod @staticmethod
def create_completion( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, cookies: dict = None,
**kwargs: Any, **kwargs,
) -> CreateResult: ) -> AsyncGenerator:
url_param = _create_url_param(messages) if not cookies:
headers = _create_header() cookies = get_cookies("you.com")
url = f"https://you.com/api/streamingSearch?{url_param}" headers = {
response = requests.get( "Accept": "text/event-stream",
url, "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
headers=headers, "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
impersonate="chrome107", }
) async with ClientSession(headers=headers, cookies=cookies) as session:
response.raise_for_status() async with session.get(
yield _parse_output(response.text) "https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
) as response:
def _create_url_param(messages: list[dict[str, str]]): start = 'data: {"youChatToken": '
prompt = messages.pop()["content"] async for line in response.content:
chat = _convert_chat(messages) line = line.decode('utf-8')
param = {"q": prompt, "domain": "youchat", "chat": chat} if line.startswith(start):
return urllib.parse.urlencode(param) yield json.loads(line[len(start): -2])
def _convert_chat(messages: list[dict[str, str]]):
message_iter = iter(messages)
return [
{"question": user["content"], "answer": assistant["content"]}
for user, assistant in zip(message_iter, message_iter)
]
def _create_header():
return {
"accept": "text/event-stream",
"referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
}
def _parse_output(output: str) -> str:
regex = r"^data:\s{\"youChatToken\": \"(.*)\"}$"
tokens = [token for token in re.findall(regex, output, re.MULTILINE)]
return "".join(tokens)

View File

@ -1,45 +1,45 @@
import requests from __future__ import annotations
from ..typing import Any, CreateResult from aiohttp import ClientSession
from .base_provider import BaseProvider
from .base_provider import AsyncProvider, format_prompt
class Yqcloud(BaseProvider): class Yqcloud(AsyncProvider):
url = "https://chat9.yqcloud.top/" url = "https://chat9.yqcloud.top/"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( async def create_async(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, proxy: str = None,
**kwargs: Any, **kwargs,
) -> CreateResult: ) -> str:
headers = _create_header() async with ClientSession(
payload = _create_payload(messages) headers=_create_header()
) as session:
url = "https://api.aichatos.cloud/api/generateStream" payload = _create_payload(messages)
response = requests.post(url=url, headers=headers, json=payload) async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status() response.raise_for_status()
response.encoding = 'utf-8' return await response.text()
yield response.text
def _create_header(): def _create_header():
return { return {
"accept": "application/json, text/plain, */*", "accept" : "application/json, text/plain, */*",
"content-type": "application/json", "content-type" : "application/json",
"origin": "https://chat9.yqcloud.top", "origin" : "https://chat9.yqcloud.top",
} }
def _create_payload(messages: list[dict[str, str]]): def _create_payload(messages: list[dict[str, str]]):
prompt = messages[-1]["content"]
return { return {
"prompt": prompt, "prompt": format_prompt(messages),
"network": True, "network": True,
"system": "", "system": "",
"withoutContext": False, "withoutContext": False,
"stream": False, "stream": False,
"userId": "#/chat/1693025544336"
} }

View File

@ -1,59 +1,69 @@
from .Acytoo import Acytoo from __future__ import annotations
from .Aichat import Aichat from .Acytoo import Acytoo
from .Ails import Ails from .Aichat import Aichat
from .AiService import AiService from .Ails import Ails
from .AItianhu import AItianhu from .AiService import AiService
from .Bard import Bard from .AItianhu import AItianhu
from .base_provider import BaseProvider from .Bard import Bard
from .Bing import Bing from .Bing import Bing
from .ChatgptAi import ChatgptAi from .ChatgptAi import ChatgptAi
from .ChatgptLogin import ChatgptLogin from .ChatgptLogin import ChatgptLogin
from .DeepAi import DeepAi from .DeepAi import DeepAi
from .DfeHub import DfeHub from .DfeHub import DfeHub
from .EasyChat import EasyChat from .EasyChat import EasyChat
from .Forefront import Forefront from .Forefront import Forefront
from .GetGpt import GetGpt from .GetGpt import GetGpt
from .H2o import H2o from .H2o import H2o
from .Liaobots import Liaobots from .HuggingChat import HuggingChat
from .Lockchat import Lockchat from .Liaobots import Liaobots
from .Opchatgpts import Opchatgpts from .Lockchat import Lockchat
from .Raycast import Raycast from .Opchatgpts import Opchatgpts
from .Theb import Theb from .OpenaiChat import OpenaiChat
from .Vercel import Vercel from .OpenAssistant import OpenAssistant
from .Wewordle import Wewordle from .Raycast import Raycast
from .You import You from .Theb import Theb
from .Yqcloud import Yqcloud from .Vercel import Vercel
from .Equing import Equing from .Wewordle import Wewordle
from .FastGpt import FastGpt from .You import You
from .V50 import V50 from .Yqcloud import Yqcloud
from .Equing import Equing
from .FastGpt import FastGpt
from .V50 import V50
from .Wuguokai import Wuguokai
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
__all__ = [ __all__ = [
"BaseProvider", 'BaseProvider',
"Acytoo", 'Acytoo',
"Aichat", 'Aichat',
"Ails", 'Ails',
"AiService", 'AiService',
"AItianhu", 'AItianhu',
"Bard", 'Bard',
"Bing", 'Bing',
"ChatgptAi", 'ChatgptAi',
"ChatgptLogin", 'ChatgptLogin',
"DeepAi", 'DeepAi',
"DfeHub", 'DfeHub',
"EasyChat", 'EasyChat',
"Forefront", 'Forefront',
"GetGpt", 'GetGpt',
"H2o", 'H2o',
"Liaobots", 'HuggingChat',
"Lockchat", 'Liaobots',
"Opchatgpts", 'Lockchat',
"Raycast", 'Opchatgpts',
"Theb", 'Raycast',
"Vercel", 'OpenaiChat',
"Wewordle", 'OpenAssistant',
"You", 'Theb',
"Yqcloud", 'Vercel',
"Equing", 'Wewordle',
"FastGpt", 'You',
"V50" 'Yqcloud',
'Equing',
'FastGpt',
'Wuguokai',
'V50'
] ]

View File

@ -1,24 +1,28 @@
from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from ..typing import Any, CreateResult import browser_cookie3
from ..typing import Any, AsyncGenerator, CreateResult, Union
class BaseProvider(ABC): class BaseProvider(ABC):
url: str url: str
working = False working = False
needs_auth = False needs_auth = False
supports_stream = False supports_stream = False
supports_gpt_35_turbo = False supports_gpt_35_turbo = False
supports_gpt_4 = False supports_gpt_4 = False
@staticmethod @staticmethod
@abstractmethod @abstractmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool, **kwargs: Any) -> CreateResult:
**kwargs: Any,
) -> CreateResult:
raise NotImplementedError() raise NotImplementedError()
@classmethod @classmethod
@ -30,4 +34,89 @@ class BaseProvider(ABC):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"
_cookies = {}
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages.pop()["content"]
class AsyncProvider(BaseProvider):
@classmethod
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = False, **kwargs: Any) -> CreateResult:
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@abstractmethod
async def create_async(
model: str,
messages: list[dict[str, str]], **kwargs: Any) -> str:
raise NotImplementedError()
class AsyncGeneratorProvider(AsyncProvider):
@classmethod
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
**kwargs
) -> CreateResult:
yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
if chunks:
return "".join(chunks)
@staticmethod
@abstractmethod
def create_async_generator(
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
raise NotImplementedError()
def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.new_event_loop()
gen = generator.__aiter__()
while True:
try:
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break

View File

@ -1,45 +1,43 @@
from . import models from __future__ import annotations
from .Provider import BaseProvider from . import models
from .typing import Any, CreateResult, Union from .Provider import BaseProvider
from .typing import Any, CreateResult, Union
logging = False logging = False
class ChatCompletion: class ChatCompletion:
@staticmethod @staticmethod
def create( def create(
model: Union[models.Model, str], model : Union[models.Model, str],
messages: list[dict[str, str]], messages : list[dict[str, str]],
provider: Union[type[BaseProvider], None] = None, provider : Union[type[BaseProvider], None] = None,
stream: bool = False, stream : bool = False,
auth: Union[str, None] = None, auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
**kwargs: Any,
) -> Union[CreateResult, str]:
if isinstance(model, str): if isinstance(model, str):
try: try:
model = models.ModelUtils.convert[model] model = models.ModelUtils.convert[model]
except KeyError: except KeyError:
raise Exception(f"The model: {model} does not exist") raise Exception(f'The model: {model} does not exist')
provider = model.best_provider if provider == None else provider provider = model.best_provider if provider == None else provider
if not provider.working: if not provider.working:
raise Exception(f"{provider.__name__} is not working") raise Exception(f'{provider.__name__} is not working')
if provider.needs_auth and not auth: if provider.needs_auth and not auth:
raise Exception( raise Exception(
f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)' f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
)
if provider.needs_auth: if provider.needs_auth:
kwargs["auth"] = auth kwargs['auth'] = auth
if not provider.supports_stream and stream: if not provider.supports_stream and stream:
raise Exception( raise Exception(
f"ValueError: {provider.__name__} does not support 'stream' argument" f'ValueError: {provider.__name__} does not support "stream" argument')
)
if logging: if logging:
print(f"Using {provider.__name__} provider") print(f'Using {provider.__name__} provider')
result = provider.create_completion(model.name, messages, stream, **kwargs) result = provider.create_completion(model.name, messages, stream, **kwargs)
return result if stream else "".join(result) return result if stream else ''.join(result)

View File

@ -1,225 +1,207 @@
from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
@dataclass @dataclass
class Model: class Model:
name: str name: str
base_provider: str base_provider: str
best_provider: type[BaseProvider] best_provider: type[BaseProvider]
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name="",
base_provider="huggingface",
best_provider=H2o,
)
# GPT-3.5 / GPT-4 # GPT-3.5 / GPT-4
gpt_35_turbo = Model( gpt_35_turbo = Model(
name="gpt-3.5-turbo", name = 'gpt-3.5-turbo',
base_provider="openai", base_provider = 'openai',
best_provider=GetGpt, best_provider = GetGpt)
)
gpt_4 = Model( gpt_4 = Model(
name="gpt-4", name = 'gpt-4',
base_provider="openai", base_provider = 'openai',
best_provider=Liaobots, best_provider = Liaobots)
)
# Bard # Bard
palm = Model( palm = Model(
name="palm", name = 'palm',
base_provider="google", base_provider = 'google',
best_provider=Bard, best_provider = Bard)
)
# H2o # H2o
falcon_7b = Model( falcon_7b = Model(
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3", name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=H2o, best_provider = H2o)
)
falcon_40b = Model( falcon_40b = Model(
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1", name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=H2o, best_provider = H2o)
)
llama_13b = Model( llama_13b = Model(
name="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b", name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=H2o, best_provider = H2o)
)
# Vercel # Vercel
claude_instant_v1 = Model( claude_instant_v1 = Model(
name="anthropic:claude-instant-v1", name = 'anthropic:claude-instant-v1',
base_provider="anthropic", base_provider = 'anthropic',
best_provider=Vercel, best_provider = Vercel)
)
claude_v1 = Model( claude_v1 = Model(
name="anthropic:claude-v1", name = 'anthropic:claude-v1',
base_provider="anthropic", base_provider = 'anthropic',
best_provider=Vercel, best_provider = Vercel)
)
claude_v2 = Model( claude_v2 = Model(
name="anthropic:claude-v2", name = 'anthropic:claude-v2',
base_provider="anthropic", base_provider = 'anthropic',
best_provider=Vercel, best_provider = Vercel)
)
command_light_nightly = Model( command_light_nightly = Model(
name="cohere:command-light-nightly", name = 'cohere:command-light-nightly',
base_provider="cohere", base_provider = 'cohere',
best_provider=Vercel, best_provider = Vercel)
)
command_nightly = Model( command_nightly = Model(
name="cohere:command-nightly", name = 'cohere:command-nightly',
base_provider="cohere", base_provider = 'cohere',
best_provider=Vercel, best_provider = Vercel)
)
gpt_neox_20b = Model( gpt_neox_20b = Model(
name="huggingface:EleutherAI/gpt-neox-20b", name = 'huggingface:EleutherAI/gpt-neox-20b',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
oasst_sft_1_pythia_12b = Model( oasst_sft_1_pythia_12b = Model(
name="huggingface:OpenAssistant/oasst-sft-1-pythia-12b", name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
oasst_sft_4_pythia_12b_epoch_35 = Model( oasst_sft_4_pythia_12b_epoch_35 = Model(
name="huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
santacoder = Model( santacoder = Model(
name="huggingface:bigcode/santacoder", name = 'huggingface:bigcode/santacoder',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
bloom = Model( bloom = Model(
name="huggingface:bigscience/bloom", name = 'huggingface:bigscience/bloom',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
flan_t5_xxl = Model( flan_t5_xxl = Model(
name="huggingface:google/flan-t5-xxl", name = 'huggingface:google/flan-t5-xxl',
base_provider="huggingface", base_provider = 'huggingface',
best_provider=Vercel, best_provider = Vercel)
)
code_davinci_002 = Model( code_davinci_002 = Model(
name="openai:code-davinci-002", name = 'openai:code-davinci-002',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
gpt_35_turbo_16k = Model( gpt_35_turbo_16k = Model(
name="openai:gpt-3.5-turbo-16k", name = 'openai:gpt-3.5-turbo-16k',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_16k_0613 = Model(
name="openai:gpt-3.5-turbo-16k-0613", name = 'openai:gpt-3.5-turbo-16k-0613',
base_provider="openai", base_provider = 'openai',
best_provider=Equing, best_provider = Equing)
)
gpt_4_0613 = Model( gpt_4_0613 = Model(
name="openai:gpt-4-0613", name = 'openai:gpt-4-0613',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
text_ada_001 = Model( text_ada_001 = Model(
name="openai:text-ada-001", name = 'openai:text-ada-001',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
text_babbage_001 = Model( text_babbage_001 = Model(
name="openai:text-babbage-001", name = 'openai:text-babbage-001',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
text_curie_001 = Model( text_curie_001 = Model(
name="openai:text-curie-001", name = 'openai:text-curie-001',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
text_davinci_002 = Model( text_davinci_002 = Model(
name="openai:text-davinci-002", name = 'openai:text-davinci-002',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
text_davinci_003 = Model( text_davinci_003 = Model(
name="openai:text-davinci-003", name = 'openai:text-davinci-003',
base_provider="openai", base_provider = 'openai',
best_provider=Vercel, best_provider = Vercel)
)
llama13b_v2_chat = Model( llama13b_v2_chat = Model(
name="replicate:a16z-infra/llama13b-v2-chat", name = 'replicate:a16z-infra/llama13b-v2-chat',
base_provider="replicate", base_provider = 'replicate',
best_provider=Vercel, best_provider = Vercel)
)
llama7b_v2_chat = Model( llama7b_v2_chat = Model(
name="replicate:a16z-infra/llama7b-v2-chat", name = 'replicate:a16z-infra/llama7b-v2-chat',
base_provider="replicate", base_provider = 'replicate',
best_provider=Vercel, best_provider = Vercel)
)
class ModelUtils: class ModelUtils:
convert: dict[str, Model] = { convert: dict[str, Model] = {
# GPT-3.5 / GPT-4 # GPT-3.5 / GPT-4
"gpt-3.5-turbo": gpt_35_turbo, 'gpt-3.5-turbo' : gpt_35_turbo,
"gpt-4": gpt_4, 'gpt-4' : gpt_4,
# Bard # Bard
"palm2": palm, 'palm2' : palm,
"palm": palm, 'palm' : palm,
"google": palm, 'google' : palm,
"google-bard": palm, 'google-bard' : palm,
"google-palm": palm, 'google-palm' : palm,
"bard": palm, 'bard' : palm,
# H2o # H2o
"falcon-40b": falcon_40b, 'falcon-40b' : falcon_40b,
"falcon-7b": falcon_7b, 'falcon-7b' : falcon_7b,
"llama-13b": llama_13b, 'llama-13b' : llama_13b,
# Vercel # Vercel
"claude-instant-v1": claude_instant_v1, 'claude-instant-v1' : claude_instant_v1,
"claude-v1": claude_v1, 'claude-v1' : claude_v1,
"claude-v2": claude_v2, 'claude-v2' : claude_v2,
"command-light-nightly": command_light_nightly, 'command-nightly' : command_nightly,
"command-nightly": command_nightly, 'gpt-neox-20b' : gpt_neox_20b,
"gpt-neox-20b": gpt_neox_20b, 'santacoder' : santacoder,
"oasst-sft-1-pythia-12b": oasst_sft_1_pythia_12b, 'bloom' : bloom,
"oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35, 'flan-t5-xxl' : flan_t5_xxl,
"santacoder": santacoder, 'code-davinci-002' : code_davinci_002,
"bloom": bloom, 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
"flan-t5-xxl": flan_t5_xxl, 'gpt-4-0613' : gpt_4_0613,
"code-davinci-002": code_davinci_002, 'text-ada-001' : text_ada_001,
"gpt-3.5-turbo-16k": gpt_35_turbo_16k, 'text-babbage-001' : text_babbage_001,
"gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613, 'text-curie-001' : text_curie_001,
"gpt-4-0613": gpt_4_0613, 'text-davinci-002' : text_davinci_002,
"text-ada-001": text_ada_001, 'text-davinci-003' : text_davinci_003,
"text-babbage-001": text_babbage_001, 'llama13b-v2-chat' : llama13b_v2_chat,
"text-curie-001": text_curie_001, 'llama7b-v2-chat' : llama7b_v2_chat,
"text-davinci-002": text_davinci_002,
"text-davinci-003": text_davinci_003, 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
"llama13b-v2-chat": llama13b_v2_chat, 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
"llama7b-v2-chat": llama7b_v2_chat, 'command-light-nightly' : command_light_nightly,
} 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
}

View File

@ -1,15 +1,14 @@
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union
SHA256 = NewType("sha_256_hash", str) SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None] CreateResult = Generator[str, None, None]
__all__ = [ __all__ = [
"Any", 'Any',
"AsyncGenerator", 'AsyncGenerator',
"Generator", 'Generator',
"Tuple", 'Tuple',
"TypedDict", 'TypedDict',
"SHA256", 'SHA256',
"CreateResult", 'CreateResult',
] ]

View File

@ -12,6 +12,7 @@ from g4f import ChatCompletion
app = Flask(__name__) app = Flask(__name__)
CORS(app) CORS(app)
@app.route("/chat/completions", methods=["POST"]) @app.route("/chat/completions", methods=["POST"])
def chat_completions(): def chat_completions():
model = request.get_json().get("model", "gpt-3.5-turbo") model = request.get_json().get("model", "gpt-3.5-turbo")
@ -87,5 +88,9 @@ def chat_completions():
return app.response_class(streaming(), mimetype="text/event-stream") return app.response_class(streaming(), mimetype="text/event-stream")
def main():
app.run(host="0.0.0.0", port=1337, debug=True)
if __name__ == "__main__": if __name__ == "__main__":
app.run(host="0.0.0.0", port=1337, debug=True) main()

View File

@ -8,4 +8,5 @@ websockets
js2py js2py
quickjs quickjs
flask flask
flask-cors flask-cors
httpx

View File

@ -11,7 +11,10 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
with open("requirements.txt") as f: with open("requirements.txt") as f:
required = f.read().splitlines() required = f.read().splitlines()
VERSION = '0.0.2.3' with open("interference/requirements.txt") as f:
api_required = f.read().splitlines()
VERSION = "0.0.2.6"
DESCRIPTION = ( DESCRIPTION = (
"The official gpt4free repository | various collection of powerful language models" "The official gpt4free repository | various collection of powerful language models"
) )
@ -26,11 +29,16 @@ setup(
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
long_description=long_description, long_description=long_description,
packages=find_packages(), packages=find_packages(),
data_files=["interference/app.py"],
install_requires=required, install_requires=required,
url='https://github.com/xtekky/gpt4free', # Link to your GitHub repository extras_require={"api": api_required},
entry_points={
"console_scripts": ["g4f=interference.app:main"],
},
url="https://github.com/xtekky/gpt4free", # Link to your GitHub repository
project_urls={ project_urls={
'Source Code': 'https://github.com/xtekky/gpt4free', # GitHub link "Source Code": "https://github.com/xtekky/gpt4free", # GitHub link
'Bug Tracker': 'https://github.com/xtekky/gpt4free/issues', # Link to issue tracker "Bug Tracker": "https://github.com/xtekky/gpt4free/issues", # Link to issue tracker
}, },
keywords=[ keywords=[
"python", "python",

25
testing/log_time.py Normal file
View File

@ -0,0 +1,25 @@
from time import time
async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
def log_time_yield(method: callable, **kwargs):
start = time()
result = yield from method(**kwargs)
yield f" {round(time() - start, 2)} secs"
def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs

View File

@ -14,4 +14,4 @@ response = g4f.ChatCompletion.create(
active_server=5, active_server=5,
) )
print(response) print(response)

View File

@ -0,0 +1,96 @@
import sys
from pathlib import Path
import asyncio
sys.path.append(str(Path(__file__).parent.parent))
import g4f
from testing.log_time import log_time, log_time_async, log_time_yield
_providers = [
g4f.Provider.H2o,
g4f.Provider.You,
g4f.Provider.HuggingChat,
g4f.Provider.OpenAssistant,
g4f.Provider.Bing,
g4f.Provider.Bard
]
_instruct = "Hello, tell about you in one sentence."
_example = """
OpenaiChat: Hello! How can I assist you today? 2.0 secs
Bard: Hello! How can I help you today? 3.44 secs
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
Async Total: 4.25 secs
OpenaiChat: Hello! How can I assist you today? 1.85 secs
Bard: Hello! How can I help you today? 3.38 secs
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
Stream Total: 11.37 secs
OpenaiChat: Hello! How can I help you today? 3.28 secs
Bard: Hello there! How can I help you today? 3.58 secs
Bing: Hello! How can I help you today? 3.28 secs
No Stream Total: 10.14 secs
"""
print("Bing: ", end="")
for response in log_time_yield(
g4f.ChatCompletion.create,
model=g4f.models.gpt_35_turbo,
messages=[{"role": "user", "content": _instruct}],
provider=g4f.Provider.Bing,
#cookies=g4f.get_cookies(".huggingface.co"),
#stream=True,
auth=True
):
print(response, end="")
print()
print()
async def run_async():
responses = [
log_time_async(
provider.create_async,
model=None,
messages=[{"role": "user", "content": _instruct}],
)
for provider in _providers
]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
print("Async Total:", asyncio.run(log_time_async(run_async)))
print()
def run_stream():
for provider in _providers:
print(f"{provider.__name__}: ", end="")
for response in log_time_yield(
provider.create_completion,
model=None,
messages=[{"role": "user", "content": _instruct}],
):
print(response, end="")
print()
print("Stream Total:", log_time(run_stream))
print()
def create_no_stream():
for provider in _providers:
print(f"{provider.__name__}:", end=" ")
for response in log_time_yield(
provider.create_completion,
model=None,
messages=[{"role": "user", "content": _instruct}],
stream=False
):
print(response, end="")
print()
print("No Stream Total:", log_time(create_no_stream))
print()

View File

@ -1,67 +1,76 @@
import sys import sys
from pathlib import Path from pathlib import Path
from colorama import Fore
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
from g4f import BaseProvider, models, provider from g4f import BaseProvider, models, Provider
logging = False
def main(): def main():
providers = get_providers() providers = get_providers()
results: list[list[str | bool]] = [] failed_providers = []
for _provider in providers: for _provider in providers:
print("start", _provider.__name__) if _provider.needs_auth:
actual_working = judge(_provider) continue
expected_working = _provider.working print("Provider:", _provider.__name__)
match = actual_working == expected_working result = test(_provider)
print("Result:", result)
if _provider.working and not result:
failed_providers.append(_provider)
results.append([_provider.__name__, expected_working, actual_working, match]) print()
print("failed provider list") if failed_providers:
for result in results: print(f"{Fore.RED}Failed providers:\n")
if not result[3]: for _provider in failed_providers:
print(result) print(f"{Fore.RED}{_provider.__name__}")
else:
print(f"{Fore.GREEN}All providers are working")
def get_providers() -> list[type[BaseProvider]]: def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(provider) provider_names = dir(Provider)
ignore_names = [ ignore_names = [
"base_provider", "base_provider",
"BaseProvider", "BaseProvider"
] ]
provider_names = [ provider_names = [
provider_name provider_name
for provider_name in provider_names for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names if not provider_name.startswith("__") and provider_name not in ignore_names
] ]
return [getattr(provider, provider_name) for provider_name in provider_names] return [getattr(Provider, provider_name) for provider_name in sorted(provider_names)]
def create_response(_provider: type[BaseProvider]) -> str: def create_response(_provider: type[BaseProvider]) -> str:
model = ( if _provider.supports_gpt_35_turbo:
models.gpt_35_turbo.name model = models.gpt_35_turbo.name
if _provider is not provider.H2o elif _provider.supports_gpt_4:
else models.falcon_7b.name model = models.gpt_4
) elif hasattr(_provider, "model"):
model = _provider.model
else:
model = None
response = _provider.create_completion( response = _provider.create_completion(
model=model, model=model,
messages=[{"role": "user", "content": "Hello world!, plz yourself"}], messages=[{"role": "user", "content": "Hello"}],
stream=False, stream=False,
) )
return "".join(response) return "".join(response)
def judge(_provider: type[BaseProvider]) -> bool: def test(_provider: type[BaseProvider]) -> bool:
if _provider.needs_auth:
return _provider.working
try: try:
response = create_response(_provider) response = create_response(_provider)
assert type(response) is str assert type(response) is str
return len(response) > 1 assert len(response) > 0
return response
except Exception as e: except Exception as e:
print(e) if logging:
print(e)
return False return False

View File

@ -30,4 +30,4 @@ def create_content():
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -6,14 +6,35 @@ from urllib.parse import urlparse
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
from g4f import models, Provider from g4f import models, Provider
from g4f.Provider.base_provider import BaseProvider from g4f.Provider.base_provider import BaseProvider, AsyncProvider
from testing.test_providers import test
def main(): def print_imports():
print_providers() print("##### Providers:")
print("\n", "-" * 50, "\n") print("```py")
print_models() print("from g4f.Provider import (")
for _provider in get_providers():
if _provider.working:
print(f" {_provider.__name__},")
print(")")
print("# Usage:")
print("response = g4f.ChatCompletion.create(..., provider=ProviderName)")
print("```")
print()
print()
def print_async():
print("##### Async support:")
print("```py")
print("from g4f.Provider import (")
for _provider in get_providers():
if issubclass(_provider, AsyncProvider):
print(f" {_provider.__name__},")
print(")")
print("```")
print()
print()
def print_providers(): def print_providers():
lines = [ lines = [
@ -21,40 +42,50 @@ def print_providers():
"| ------ | ------- | ------- | ----- | --------- | ------ | ---- |", "| ------ | ------- | ------- | ----- | --------- | ------ | ---- |",
] ]
providers = get_providers() providers = get_providers()
for _provider in providers: for is_working in (True, False):
netloc = urlparse(_provider.url).netloc for _provider in providers:
website = f"[{netloc}]({_provider.url})" if is_working != _provider.working:
continue
netloc = urlparse(_provider.url).netloc
website = f"[{netloc}]({_provider.url})"
provider_name = f"g4f.provider.{_provider.__name__}" provider_name = f"g4f.provider.{_provider.__name__}"
has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "" has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else ""
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "" has_gpt_4 = "✔️" if _provider.supports_gpt_4 else ""
stream = "✔️" if _provider.supports_stream else "" stream = "✔️" if _provider.supports_stream else ""
status = ( if _provider.working:
"![Active](https://img.shields.io/badge/Active-brightgreen)" if test(_provider):
if _provider.working status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
else "![Inactive](https://img.shields.io/badge/Inactive-red)" else:
) status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
auth = "✔️" if _provider.needs_auth else "" else:
status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
auth = "✔️" if _provider.needs_auth else ""
lines.append( lines.append(
f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |" f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
) )
print("\n".join(lines)) print("\n".join(lines))
def get_providers() -> list[type[BaseProvider]]: def get_provider_names() -> list[str]:
provider_names = dir(Provider) provider_names = dir(Provider)
ignore_names = [ ignore_names = [
"base_provider", "base_provider",
"BaseProvider", "BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider"
] ]
provider_names = [ return [
provider_name provider_name
for provider_name in provider_names for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names if not provider_name.startswith("__") and provider_name not in ignore_names
] ]
return [getattr(Provider, provider_name) for provider_name in provider_names]
def get_providers() -> list[type[BaseProvider]]:
return [getattr(Provider, provider_name) for provider_name in get_provider_names()]
def print_models(): def print_models():
@ -79,6 +110,8 @@ def print_models():
_models = get_models() _models = get_models()
for model in _models: for model in _models:
if model.best_provider.__name__ not in provider_urls:
continue
split_name = re.split(r":|/", model.name) split_name = re.split(r":|/", model.name)
name = split_name[-1] name = split_name[-1]
@ -100,4 +133,8 @@ def get_models():
if __name__ == "__main__": if __name__ == "__main__":
main() print_imports()
print_async()
print_providers()
print("\n", "-" * 50, "\n")
print_models()