Merge remote-tracking branch 'origin/main'

# Conflicts:
#	g4f/Provider/Aivvm.py
#	g4f/models.py
This commit is contained in:
Commenter123321 2023-10-10 14:15:12 +02:00
commit 0e4297494d
45 changed files with 566 additions and 376 deletions

View File

@ -2,7 +2,7 @@
By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, reuploads made by other users, or anything else related to gpt4free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, reuploads made by other users, or anything else related to gpt4free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
- latest pypi version: ([0.1.5.6](https://pypi.org/project/g4f/0.1.5.6)): - latest pypi version: `[0.1.5.7](https://pypi.org/project/g4f/0.1.5.7)`:
```sh ```sh
pip install -U g4f pip install -U g4f
``` ```
@ -224,19 +224,15 @@ from g4f.Provider import (
Bing, Bing,
ChatBase, ChatBase,
ChatgptAi, ChatgptAi,
ChatgptLogin,
CodeLinkAva,
DeepAi, DeepAi,
H2o, H2o,
HuggingChat, HuggingChat,
Opchatgpts,
OpenAssistant, OpenAssistant,
OpenaiChat, OpenaiChat,
Raycast, Raycast,
Theb, Theb,
Vercel, Vercel,
Vitalentum, Vitalentum,
Wewordle,
Ylokh, Ylokh,
You, You,
Yqcloud, Yqcloud,
@ -284,19 +280,18 @@ _providers = [
g4f.Provider.Aichat, g4f.Provider.Aichat,
g4f.Provider.ChatBase, g4f.Provider.ChatBase,
g4f.Provider.Bing, g4f.Provider.Bing,
g4f.Provider.CodeLinkAva,
g4f.Provider.DeepAi, g4f.Provider.DeepAi,
g4f.Provider.GptGo, g4f.Provider.GptGo,
g4f.Provider.Wewordle,
g4f.Provider.You, g4f.Provider.You,
g4f.Provider.Yqcloud, g4f.Provider.Yqcloud,
] ]
async def run_provider(provider: g4f.Provider.AsyncProvider): async def run_provider(provider: g4f.Provider.BaseProvider):
try: try:
response = await provider.create_async( response = await g4f.ChatCompletion.create_async(
model=g4f.models.default.name, model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}], messages=[{"role": "user", "content": "Hello"}],
provider=provider,
) )
print(f"{provider.__name__}:", response) print(f"{provider.__name__}:", response)
except Exception as e: except Exception as e:
@ -311,6 +306,22 @@ async def run_all():
asyncio.run(run_all()) asyncio.run(run_all())
``` ```
##### Proxy Support:
All providers support specifying a proxy in the create function.
```py
import g4f
response = await g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
proxy="http://host:port",
# or socks5://user:pass@host:port
)
print(f"Result:", response)
```
### interference openai-proxy api (use with openai python package) ### interference openai-proxy api (use with openai python package)
#### run interference from pypi package: #### run interference from pypi package:
@ -521,38 +532,50 @@ if __name__ == "__main__":
## Contribute ## Contribute
to add another provider, its very simple: ####Create Provider with AI Tool
Call in your terminal the "create_provider" script:
```bash
$ python etc/tool/create_provider.py
```
1. Enter your name for the new provider.
2. Copy&Paste a cURL command from your browser developer tools.
3. Let the AI create the provider for you.
4. Customize the provider according to your needs.
####Create Provider
0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source! 0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider 1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py). 2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
```py ```py
from .base_provider import BaseProvider from __future__ import annotations
from ..typing import CreateResult, Any
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class HogeService(BaseProvider): class HogeService(AsyncGeneratorProvider):
url = "http://hoge.com" url = "https://chat-gpt.com"
working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True
@staticmethod @classmethod
def create_completion( async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, proxy: str = None,
**kwargs: Any, **kwargs
) -> CreateResult: ) -> AsyncResult:
pass yield ""
``` ```
3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`... 3. Here, you can adjust the settings, for example if the website does support streaming, set `supports_stream` to `True`...
4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration 4. Write code to request the provider in `create_async_generator` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration
5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py) 5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
```py ```py
from .base_provider import BaseProvider
from .HogeService import HogeService from .HogeService import HogeService
__all__ = [ __all__ = [

View File

@ -12,23 +12,13 @@ def read_code(text):
if match: if match:
return match.group("code") return match.group("code")
def read_result(result):
lines = []
for line in result.split("\n"):
if (line.startswith("```")):
break
if (line):
lines.append(line)
explanation = "\n".join(lines) if lines else ""
return explanation, read_code(result)
def input_command(): def input_command():
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.") print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
contents = [] contents = []
while True: while True:
try: try:
line = input() line = input()
except: except EOFError:
break break
contents.append(line) contents.append(line)
return "\n".join(contents) return "\n".join(contents)
@ -41,12 +31,12 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
class ChatgptDuo(AsyncGeneratorProvider): class ChatGpt(AsyncGeneratorProvider):
url = "https://chat-gpt.com" url = "https://chat-gpt.com"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True
@ -55,9 +45,10 @@ class ChatgptDuo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "chat-gpt.com", "authority": "chat-gpt.com",
"accept": "application/json", "accept": "application/json",
@ -65,16 +56,16 @@ class ChatgptDuo(AsyncGeneratorProvider):
"referer": f"{cls.url}/chat", "referer": f"{cls.url}/chat",
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages), prompt = format_prompt(messages)
data = { data = {
"prompt": prompt, "prompt": prompt,
"purpose": "ask", "purpose": "",
} }
async with session.post(cls.url + "/api/chat", json=data) as response: async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for stream in response.content: async for chunk in response.content:
if stream: if chunk:
yield stream.decode() yield chunk.decode()
""" """
if not path.isfile(provider_path): if not path.isfile(provider_path):
@ -95,20 +86,25 @@ Replace "hello" with `format_prompt(messages)`.
And replace "gpt-3.5-turbo" with `model`. And replace "gpt-3.5-turbo" with `model`.
""" """
print("Create code...") response = []
response = g4f.ChatCompletion.create( for chunk in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long, model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
auth=True, timeout=300,
timeout=120, stream=True
) ):
print(response) response.append(chunk)
explanation, code = read_result(response) print(chunk, end="", flush=True)
print()
response = "".join(response)
code = read_code(response)
if code: if code:
with open(provider_path, "w") as file: with open(provider_path, "w") as file:
file.write(code) file.write(code)
print("Saved at:", provider_path)
with open(f"g4f/Provider/__init__.py", "a") as file: with open(f"g4f/Provider/__init__.py", "a") as file:
file.write(f"\nfrom .{name} import {name}") file.write(f"\nfrom .{name} import {name}")
else: else:
with open(provider_path, "r") as file: with open(provider_path, "r") as file:
code = file.read() code = file.read()

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import json import json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@ -16,12 +16,12 @@ class AItianhu(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not cookies: if not cookies:
cookies = get_cookies("www.aitianhu.com") cookies = get_cookies("www.aitianhu.com")
data = { data = {

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import random, json import random, json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@ -20,13 +20,13 @@ class AItianhuSpace(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
domain: str = None, domain: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif not model in domains: elif not model in domains:

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -15,11 +15,10 @@ class Acytoo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with ClientSession( async with ClientSession(
headers=_create_header() headers=_create_header()
) as session: ) as session:

View File

@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider): class AiAsk(AsyncGeneratorProvider):
@ -13,9 +13,10 @@ class AiAsk(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"accept": "application/json, text/plain, */*", "accept": "application/json, text/plain, */*",
"origin": cls.url, "origin": cls.url,
@ -33,7 +34,7 @@ class AiAsk(AsyncGeneratorProvider):
} }
buffer = "" buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!" rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response: async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
buffer += chunk.decode() buffer += chunk.decode()

View File

@ -3,7 +3,7 @@ from __future__ import annotations
import time import time
import hashlib import hashlib
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -17,11 +17,16 @@ class Aibn(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {
"messages": messages, "messages": messages,

View File

@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -13,7 +14,7 @@ class Aichat(AsyncProvider):
@staticmethod @staticmethod
async def create_async( async def create_async(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> str:

View File

@ -7,7 +7,7 @@ import json
from datetime import datetime from datetime import datetime
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import SHA256, AsyncGenerator from ..typing import SHA256, AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -19,11 +19,11 @@ class Ails(AsyncGeneratorProvider):
@staticmethod @staticmethod
async def create_async_generator( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "api.caipacity.com", "authority": "api.caipacity.com",
"accept": "*/*", "accept": "*/*",

View File

@ -7,7 +7,7 @@ import os
import uuid import uuid
import urllib.parse import urllib.parse
from aiohttp import ClientSession, ClientTimeout from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class Tones(): class Tones():
@ -32,11 +32,12 @@ class Bing(AsyncGeneratorProvider):
@staticmethod @staticmethod
def create_async_generator( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
cookies: dict = None, cookies: dict = None,
tone: str = Tones.creative, tone: str = Tones.creative,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if len(messages) < 2: if len(messages) < 2:
prompt = messages[0]["content"] prompt = messages[0]["content"]
context = None context = None
@ -46,9 +47,9 @@ class Bing(AsyncGeneratorProvider):
if not cookies or "SRCHD" not in cookies: if not cookies or "SRCHD" not in cookies:
cookies = default_cookies cookies = default_cookies
return stream_generate(prompt, tone, context, cookies) return stream_generate(prompt, tone, context, proxy, cookies)
def create_context(messages: list[dict[str, str]]): def create_context(messages: Messages):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context return context
@ -59,10 +60,10 @@ class Conversation():
self.clientId = clientId self.clientId = clientId
self.conversationSignature = conversationSignature self.conversationSignature = conversationSignature
async def create_conversation(session: ClientSession) -> Conversation: async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3' url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
async with await session.get(url) as response: async with await session.get(url, proxy=proxy) as response:
data = await response.json() data = await response.json()
conversationId = data.get('conversationId') conversationId = data.get('conversationId')
@ -80,7 +81,7 @@ async def list_conversations(session: ClientSession) -> list:
response = await response.json() response = await response.json()
return response["chats"] return response["chats"]
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list: async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
url = "https://sydney.bing.com/sydney/DeleteSingleConversation" url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
json = { json = {
"conversationId": conversation.conversationId, "conversationId": conversation.conversationId,
@ -89,7 +90,7 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
"source": "cib", "source": "cib",
"optionsSets": ["autosave"] "optionsSets": ["autosave"]
} }
async with session.post(url, json=json) as response: async with session.post(url, json=json, proxy=proxy) as response:
response = await response.json() response = await response.json()
return response["result"]["value"] == "Success" return response["result"]["value"] == "Success"
@ -239,20 +240,22 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
async def stream_generate( async def stream_generate(
prompt: str, prompt: str,
tone: str, tone: str,
context: str=None, context: str = None,
cookies: dict=None, proxy: str = None,
cookies: dict = None
): ):
async with ClientSession( async with ClientSession(
timeout=ClientTimeout(total=900), timeout=ClientTimeout(total=900),
cookies=cookies, cookies=cookies,
headers=Defaults.headers, headers=Defaults.headers,
) as session: ) as session:
conversation = await create_conversation(session) conversation = await create_conversation(session, proxy)
try: try:
async with session.ws_connect( async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub', f'wss://sydney.bing.com/sydney/ChatHub',
autoping=False, autoping=False,
params={'sec_access_token': conversation.conversationSignature} params={'sec_access_token': conversation.conversationSignature},
proxy=proxy
) as wss: ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
@ -297,4 +300,4 @@ async def stream_generate(
raise Exception(f"{result['value']}: {result['message']}") raise Exception(f"{result['value']}: {result['message']}")
return return
finally: finally:
await delete_conversation(session, conversation) await delete_conversation(session, conversation, proxy)

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -16,9 +16,10 @@ class ChatBase(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if model == "gpt-4": if model == "gpt-4":
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn" chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
elif model == "gpt-3.5-turbo" or not model: elif model == "gpt-3.5-turbo" or not model:
@ -44,7 +45,7 @@ class ChatBase(AsyncGeneratorProvider):
"chatId": chat_id, "chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}" "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
} }
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response: async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for stream in response.content.iter_any(): async for stream in response.content.iter_any():
yield stream.decode() yield stream.decode()

View File

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -14,11 +14,12 @@ class ChatForAi(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
data = { data = {
"conversationId": "temp", "conversationId": "temp",

View File

@ -3,7 +3,7 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -16,9 +16,10 @@ class Chatgpt4Online(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with ClientSession() as session: async with ClientSession() as session:
data = { data = {
"botId": "default", "botId": "default",
@ -30,7 +31,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"newMessage": messages[-1]["content"], "newMessage": messages[-1]["content"],
"stream": True "stream": True
} }
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response: async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View File

@ -3,6 +3,7 @@ from __future__ import annotations
import re import re
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -18,7 +19,7 @@ class ChatgptAi(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> str:

View File

@ -2,8 +2,8 @@ from __future__ import annotations
import time, json, re import time, json, re
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -16,10 +16,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "chat.chatgptdemo.net", "authority": "chat.chatgptdemo.net",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US", "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",

View File

@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
from ..typing import Messages
from curl_cffi.requests import AsyncSession from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -13,9 +14,9 @@ class ChatgptDuo(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> str: ) -> str:
async with AsyncSession( async with AsyncSession(

View File

@ -19,6 +19,7 @@ class ChatgptX(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { headers = {
@ -32,7 +33,7 @@ class ChatgptX(AsyncGeneratorProvider):
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/") as response: async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text() response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response) result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result: if result:
@ -62,7 +63,7 @@ class ChatgptX(AsyncGeneratorProvider):
'x-csrf-token': csrf_token, 'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest' 'x-requested-with': 'XMLHttpRequest'
} }
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response: async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
chat = await response.json() chat = await response.json()
if "response" not in chat or not chat["response"]: if "response" not in chat or not chat["response"]:
@ -82,7 +83,7 @@ class ChatgptX(AsyncGeneratorProvider):
"conversions_id": chat["conversions_id"], "conversions_id": chat["conversions_id"],
"ass_conversions_id": chat["ass_conversions_id"], "ass_conversions_id": chat["ass_conversions_id"],
} }
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response: async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from hashlib import sha256 from hashlib import sha256
from typing import AsyncGenerator, Dict, List from ..typing import AsyncResult, Messages, Dict
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -17,10 +17,10 @@ class Cromicle(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: List[Dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator[str, None]: ) -> AsyncResult:
async with ClientSession( async with ClientSession(
headers=_create_header() headers=_create_header()
) as session: ) as session:

View File

@ -6,22 +6,22 @@ import random
import hashlib import hashlib
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class DeepAi(AsyncGeneratorProvider): class DeepAi(AsyncGeneratorProvider):
url: str = "https://deepai.org" url = "https://deepai.org"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
async def create_async_generator( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
token_js = """ token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import time, hashlib, random import time, hashlib, random
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -20,11 +20,16 @@ class FreeGpt(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
impersonate="chrome107",
timeout=timeout,
proxies={"https": proxy}
) as session:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {

View File

@ -2,8 +2,8 @@ from __future__ import annotations
import secrets, time, json import secrets, time, json
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -18,9 +18,10 @@ class GPTalk(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
timestamp = int(time.time()) timestamp = int(time.time())
@ -48,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
"fingerprint": secrets.token_hex(16).zfill(32), "fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint" "platform": "fingerprint"
} }
async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response: async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
cls._auth = (await response.json())["data"] cls._auth = (await response.json())["data"]
data = { data = {
@ -68,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
headers = { headers = {
'authorization': f'Bearer {cls._auth["token"]}', 'authorization': f'Bearer {cls._auth["token"]}',
} }
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response: async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
token = (await response.json())["data"]["token"] token = (await response.json())["data"]["token"]
last_message = "" last_message = ""
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response: async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
import execjs, os, json import execjs, os, json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -16,9 +16,10 @@ class GptForLove(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
headers = { headers = {
@ -47,7 +48,7 @@ class GptForLove(AsyncGeneratorProvider):
"secret": get_secret(), "secret": get_secret(),
**kwargs **kwargs
} }
async with session.post("https://api.gptplus.one/chat-process", json=data) as response: async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
try: try:

View File

@ -18,7 +18,6 @@ class GptGo(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
headers = { headers = {
@ -73,6 +72,7 @@ class GptGo(AsyncGeneratorProvider):
("model", "str"), ("model", "str"),
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"),
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])

View File

@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
import secrets, json import secrets, json
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -14,9 +14,10 @@ class GptGod(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "text/event-stream", "Accept": "text/event-stream",
@ -24,7 +25,7 @@ class GptGod(AsyncGeneratorProvider):
"Accept-Encoding": "gzip, deflate, br", "Accept-Encoding": "gzip, deflate, br",
"Alt-Used": "gptgod.site", "Alt-Used": "gptgod.site",
"Connection": "keep-alive", "Connection": "keep-alive",
"Referer": "https://gptgod.site/", "Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty", "Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors", "Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin", "Sec-Fetch-Site": "same-origin",
@ -37,7 +38,7 @@ class GptGod(AsyncGeneratorProvider):
"content": prompt, "content": prompt,
"id": secrets.token_hex(16).zfill(32) "id": secrets.token_hex(16).zfill(32)
} }
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response: async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
event = None event = None
async for line in response.content: async for line in response.content:

View File

@ -4,7 +4,7 @@ import uuid
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
models = { models = {
@ -39,11 +39,11 @@ class Liaobots(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
auth: str = None, auth: str = None,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
model = model if model in models else "gpt-3.5-turbo" model = model if model in models else "gpt-3.5-turbo"
headers = { headers = {
"authority": "liaobots.com", "authority": "liaobots.com",

View File

@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 90, timeout: int = 90,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
@ -47,7 +48,8 @@ class Myshell(AsyncGeneratorProvider):
async with session.ws_connect( async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket", "wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False, autoping=False,
timeout=timeout timeout=timeout,
proxy=proxy
) as wss: ) as wss:
# Send and receive hello message # Send and receive hello message
await wss.receive_str() await wss.receive_str()

View File

@ -19,6 +19,7 @@ class Phind(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
@ -43,7 +44,12 @@ class Phind(AsyncGeneratorProvider):
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/" "Referer": f"{cls.url}/"
} }
async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session: async with StreamSession(
headers=headers,
timeout=(5, timeout),
proxies={"https": proxy},
impersonate="chrome107"
) as session:
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response: async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
response.raise_for_status() response.raise_for_status()
new_lines = 0 new_lines = 0
@ -71,6 +77,7 @@ class Phind(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"), ("proxy", "str"),
("timeout", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import json, base64, requests, execjs, random, uuid import json, base64, requests, execjs, random, uuid
from ..typing import Any, TypedDict, CreateResult from ..typing import Messages, TypedDict, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
from abc import abstractmethod from abc import abstractmethod
@ -17,8 +17,9 @@ class Vercel(BaseProvider):
@abstractmethod @abstractmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
proxy: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not model: if not model:
@ -52,15 +53,18 @@ class Vercel(BaseProvider):
'model' : model_info[model]['id'], 'model' : model_info[model]['id'],
'messages' : messages, 'messages' : messages,
'playgroundId': str(uuid.uuid4()), 'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params'] 'chatIndex' : 0,
**model_info[model]['default_params'],
**kwargs
}
max_retries = kwargs.get('max_retries', 20) max_retries = kwargs.get('max_retries', 20)
for i in range(max_retries): for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate', response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True) headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try: try:
response.raise_for_status() response.raise_for_status()
except: except Exception:
continue continue
for token in response.iter_content(chunk_size=None): for token in response.iter_content(chunk_size=None):
yield token.decode() yield token.decode()

View File

@ -42,7 +42,7 @@ class Vitalentum(AsyncGeneratorProvider):
async with ClientSession( async with ClientSession(
headers=headers headers=headers
) as session: ) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
line = line.decode() line = line.decode()

View File

@ -38,6 +38,68 @@ from .deprecated import *
from .needs_auth import * from .needs_auth import *
from .unfinished import * from .unfinished import *
class ProviderUtils:
convert: dict[str, BaseProvider] = {
'AItianhu': AItianhu,
'AItianhuSpace': AItianhuSpace,
'Acytoo': Acytoo,
'AiAsk': AiAsk,
'AiService': AiService,
'Aibn': Aibn,
'Aichat': Aichat,
'Ails': Ails,
'Aivvm': Aivvm,
'AsyncGeneratorProvider': AsyncGeneratorProvider,
'AsyncProvider': AsyncProvider,
'Bard': Bard,
'BaseProvider': BaseProvider,
'Bing': Bing,
'ChatBase': ChatBase,
'ChatForAi': ChatForAi,
'Chatgpt4Online': Chatgpt4Online,
'ChatgptAi': ChatgptAi,
'ChatgptDemo': ChatgptDemo,
'ChatgptDuo': ChatgptDuo,
'ChatgptLogin': ChatgptLogin,
'ChatgptX': ChatgptX,
'CodeLinkAva': CodeLinkAva,
'Cromicle': Cromicle,
'DeepAi': DeepAi,
'DfeHub': DfeHub,
'EasyChat': EasyChat,
'Equing': Equing,
'FastGpt': FastGpt,
'Forefront': Forefront,
'FreeGpt': FreeGpt,
'GPTalk': GPTalk,
'GetGpt': GetGpt,
'GptForLove': GptForLove,
'GptGo': GptGo,
'GptGod': GptGod,
'H2o': H2o,
'HuggingChat': HuggingChat,
'Komo': Komo,
'Liaobots': Liaobots,
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
'OpenaiChat': OpenaiChat,
'PerplexityAi': PerplexityAi,
'Phind': Phind,
'Raycast': Raycast,
'Theb': Theb,
'V50': V50,
'Vercel': Vercel,
'Vitalentum': Vitalentum,
'Wewordle': Wewordle,
'Wuguokai': Wuguokai,
'Ylokh': Ylokh,
'You': You,
'Yqcloud': Yqcloud
}
__all__ = [ __all__ = [
'BaseProvider', 'BaseProvider',
'AsyncProvider', 'AsyncProvider',

View File

@ -6,7 +6,9 @@ import re
from aiohttp import ClientSession from aiohttp import ClientSession
from ..base_provider import AsyncProvider, format_prompt, get_cookies from ...typing import Messages
from ..base_provider import AsyncProvider
from ..helper import format_prompt, get_cookies
class Bard(AsyncProvider): class Bard(AsyncProvider):
@ -19,25 +21,22 @@ class Bard(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs
) -> str: ) -> str:
prompt = format_prompt(messages) prompt = format_prompt(messages)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies: if not cookies:
cookies = get_cookies(".google.com") cookies = get_cookies(".google.com")
headers = { headers = {
'authority': 'bard.google.com', 'authority': 'bard.google.com',
'origin': 'https://bard.google.com', 'origin': cls.url,
'referer': 'https://bard.google.com/', 'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'x-same-domain': '1', 'x-same-domain': '1',
} }
async with ClientSession( async with ClientSession(
cookies=cookies, cookies=cookies,
headers=headers headers=headers
@ -67,7 +66,6 @@ class Bard(AsyncProvider):
'lamda', 'lamda',
'BardFrontendService' 'BardFrontendService'
]) ])
async with session.post( async with session.post(
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
data=data, data=data,

View File

@ -4,8 +4,9 @@ import json, uuid
from aiohttp import ClientSession from aiohttp import ClientSession
from ...typing import AsyncGenerator from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider): class HuggingChat(AsyncGeneratorProvider):
@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool = True, stream: bool = True,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
model = model if model else cls.model model = model if model else cls.model
if proxy and "://" not in proxy: if proxy and "://" not in proxy:
proxy = f"http://{proxy}" proxy = f"http://{proxy}"

View File

@ -4,8 +4,9 @@ import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ...typing import Any, AsyncGenerator from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider): class OpenAssistant(AsyncGeneratorProvider):
@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs: Any **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not cookies: if not cookies:
cookies = get_cookies("open-assistant.io") cookies = get_cookies("open-assistant.io")

View File

@ -4,7 +4,7 @@ import uuid, json, time
from ..base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from ..helper import get_browser, get_cookies, format_prompt from ..helper import get_browser, get_cookies, format_prompt
from ...typing import AsyncGenerator from ...typing import AsyncResult, Messages
from ...requests import StreamSession from ...requests import StreamSession
class OpenaiChat(AsyncGeneratorProvider): class OpenaiChat(AsyncGeneratorProvider):
@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120,
access_token: str = None, access_token: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, **kwargs
**kwargs: dict ) -> AsyncResult:
) -> AsyncGenerator:
proxies = {"https": proxy} proxies = {"https": proxy}
if not access_token: if not access_token:
access_token = await cls.get_access_token(cookies, proxies) access_token = await cls.get_access_token(cookies, proxies)
@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider):
"Accept": "text/event-stream", "Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}", "Authorization": f"Bearer {access_token}",
} }
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
proxies=proxies,
headers=headers,
impersonate="chrome107",
timeout=timeout
) as session:
messages = [ messages = [
{ {
"id": str(uuid.uuid4()), "id": str(uuid.uuid4()),

View File

@ -4,7 +4,7 @@ import json
import requests import requests
from ...typing import Any, CreateResult from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
@ -19,9 +19,10 @@ class Raycast(BaseProvider):
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
**kwargs: Any, proxy: str = None,
**kwargs,
) -> CreateResult: ) -> CreateResult:
auth = kwargs.get('auth') auth = kwargs.get('auth')
headers = { headers = {
@ -47,7 +48,13 @@ class Raycast(BaseProvider):
"system_instruction": "markdown", "system_instruction": "markdown",
"temperature": 0.5 "temperature": 0.5
} }
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) response = requests.post(
"https://backend.raycast.com/api/v1/ai/chat_completions",
headers=headers,
json=data,
stream=True,
proxies={"https": proxy}
)
for token in response.iter_lines(): for token in response.iter_lines():
if b'data: ' not in token: if b'data: ' not in token:
continue continue

View File

@ -2,11 +2,11 @@ from __future__ import annotations
import json import json
import random import random
import requests import requests
from ...typing import Any, CreateResult from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
from ..helper import format_prompt
class Theb(BaseProvider): class Theb(BaseProvider):
@ -19,12 +19,11 @@ class Theb(BaseProvider):
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, **kwargs: Any) -> CreateResult: stream: bool,
proxy: str = None,
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) **kwargs
conversation += "\nassistant: " ) -> CreateResult:
auth = kwargs.get("auth", { auth = kwargs.get("auth", {
"bearer_token":"free", "bearer_token":"free",
"org_id":"theb", "org_id":"theb",
@ -54,7 +53,7 @@ class Theb(BaseProvider):
req_rand = random.randint(100000000, 9999999999) req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = { json_data: dict[str, Any] = {
"text" : conversation, "text" : format_prompt(messages),
"category" : "04f58f64a4aa4191a957b47290fee864", "category" : "04f58f64a4aa4191a957b47290fee864",
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8", "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": { "model_params": {
@ -67,8 +66,13 @@ class Theb(BaseProvider):
} }
} }
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", response = requests.post(
headers=headers, json=json_data, stream=True) f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
headers=headers,
json=json_data,
stream=True,
proxies={"https": proxy}
)
response.raise_for_status() response.raise_for_status()
content = "" content = ""

View File

@ -2,10 +2,10 @@ from __future__ import annotations
from requests import get from requests import get
from g4f.models import Model, ModelUtils from g4f.models import Model, ModelUtils
from .Provider import BaseProvider from .Provider import BaseProvider
from .typing import CreateResult, Union from .typing import Messages, CreateResult, Union
from .debug import logging from .debug import logging
version = '0.1.5.6' version = '0.1.5.7'
version_check = True version_check = True
def check_pypi_version() -> None: def check_pypi_version() -> None:
@ -27,19 +27,19 @@ def get_model_and_provider(model : Union[Model, str],
if model in ModelUtils.convert: if model in ModelUtils.convert:
model = ModelUtils.convert[model] model = ModelUtils.convert[model]
else: else:
raise Exception(f'The model: {model} does not exist') raise ValueError(f'The model: {model} does not exist')
if not provider: if not provider:
provider = model.best_provider provider = model.best_provider
if not provider: if not provider:
raise Exception(f'No provider found for model: {model}') raise RuntimeError(f'No provider found for model: {model}')
if not provider.working: if not provider.working:
raise Exception(f'{provider.__name__} is not working') raise RuntimeError(f'{provider.__name__} is not working')
if not provider.supports_stream and stream: if not provider.supports_stream and stream:
raise Exception(f'ValueError: {provider.__name__} does not support "stream" argument') raise ValueError(f'{provider.__name__} does not support "stream" argument')
if logging: if logging:
print(f'Using {provider.__name__} provider') print(f'Using {provider.__name__} provider')
@ -48,17 +48,20 @@ def get_model_and_provider(model : Union[Model, str],
class ChatCompletion: class ChatCompletion:
@staticmethod @staticmethod
def create(model: Union[Model, str], def create(
messages : list[dict[str, str]], model: Union[Model, str],
messages : Messages,
provider : Union[type[BaseProvider], None] = None, provider : Union[type[BaseProvider], None] = None,
stream : bool = False, stream : bool = False,
auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]: auth : Union[str, None] = None,
**kwargs
) -> Union[CreateResult, str]:
model, provider = get_model_and_provider(model, provider, stream) model, provider = get_model_and_provider(model, provider, stream)
if provider.needs_auth and not auth: if provider.needs_auth and not auth:
raise Exception( raise ValueError(
f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
if provider.needs_auth: if provider.needs_auth:
kwargs['auth'] = auth kwargs['auth'] = auth
@ -69,10 +72,14 @@ class ChatCompletion:
@staticmethod @staticmethod
async def create_async( async def create_async(
model: Union[Model, str], model: Union[Model, str],
messages: list[dict[str, str]], messages: Messages,
provider: Union[type[BaseProvider], None] = None, provider: Union[type[BaseProvider], None] = None,
stream: bool = False,
**kwargs **kwargs
) -> str: ) -> str:
if stream:
raise ValueError(f'"create_async" does not support "stream" argument')
model, provider = get_model_and_provider(model, provider, False) model, provider = get_model_and_provider(model, provider, False)
return await provider.create_async(model.name, messages, **kwargs) return await provider.create_async(model.name, messages, **kwargs)

View File

@ -16,7 +16,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
view_func = site.routes[route]['function'], view_func = site.routes[route]['function'],
methods = site.routes[route]['methods'], methods = site.routes[route]['methods'],
) )
backend_api = Backend_Api(app) backend_api = Backend_Api(app)
for route in backend_api.routes: for route in backend_api.routes:
app.add_url_rule( app.add_url_rule(

View File

@ -1,161 +1,178 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0">
<meta name="description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:title" content="ChatGPT">
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:url" content="https://g4f.ai">
<link rel="stylesheet" href="/assets/css/style.css">
<link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
<link rel="manifest" href="/assets/img/site.webmanifest">
<script src="/assets/js/icons.js"></script>
<script src="/assets/js/highlightjs-copy.min.js"></script>
<script src="/assets/js/chat.v2.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
<link rel="stylesheet" href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
<script>
const user_image = `<img src="/assets/img/user.png" alt="your avatar">`;
const gpt_image = `<img src="/assets/img/gpt.png" alt="your avatar">`;
</script>
<style>
.hljs {
color: #e9e9f4;
background: #28293629;
border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border);
font-size: 15px;
}
#message-input { <head>
margin-right: 30px; <meta charset="UTF-8">
height: 80px; <meta http-equiv="X-UA-Compatible" content="IE=edge">
} <meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0">
<meta name="description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:title" content="ChatGPT">
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:url" content="https://g4f.ai">
<link rel="stylesheet" href="/assets/css/style.css">
<link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
<link rel="manifest" href="/assets/img/site.webmanifest">
<script src="/assets/js/icons.js"></script>
<script src="/assets/js/highlightjs-copy.min.js"></script>
<script src="/assets/js/chat.v2.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
<link rel="stylesheet"
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
<script>
const user_image = `<img src="/assets/img/user.png" alt="your avatar">`;
const gpt_image = `<img src="/assets/img/gpt.png" alt="your avatar">`;
</script>
<style>
.hljs {
color: #e9e9f4;
background: #28293629;
border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border);
font-size: 15px;
}
#message-input::-webkit-scrollbar { #message-input {
width: 5px; margin-right: 30px;
} height: 80px;
}
/* Track */ #message-input::-webkit-scrollbar {
#message-input::-webkit-scrollbar-track { width: 5px;
background: #f1f1f1; }
}
/* Handle */
#message-input::-webkit-scrollbar-thumb {
background: #c7a2ff;
}
/* Handle on hover */ /* Track */
#message-input::-webkit-scrollbar-thumb:hover { #message-input::-webkit-scrollbar-track {
background: #8b3dff; background: #f1f1f1;
} }
</style>
<script src="/assets/js/highlight.min.js"></script> /* Handle */
<script>window.conversation_id = `{{chat_id}}`</script> #message-input::-webkit-scrollbar-thumb {
<title>g4f - gui</title> background: #c7a2ff;
</head> }
<body>
<div class="gradient"></div> /* Handle on hover */
<div class="row"> #message-input::-webkit-scrollbar-thumb:hover {
<div class="box conversations"> background: #8b3dff;
<div class="top"> }
<button class="new_convo" onclick="new_conversation()"> </style>
<i class="fa-regular fa-plus"></i> <script src="/assets/js/highlight.min.js"></script>
<span>New Conversation</span> <script>window.conversation_id = `{{chat_id}}`</script>
</button> <title>g4f - gui</title>
</head>
<body>
<div class="gradient"></div>
<div class="row">
<div class="box conversations">
<div class="top">
<button class="new_convo" onclick="new_conversation()">
<i class="fa-regular fa-plus"></i>
<span>New Conversation</span>
</button>
</div>
<div class="bottom_buttons">
<button onclick="delete_conversations()">
<i class="fa-regular fa-trash"></i>
<span>Clear Conversations</span>
</button>
<div class="info">
<i class="fa-brands fa-discord"></i>
<span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br>
</span>
</div> </div>
<div class="bottom_buttons"> <div class="info">
<button onclick="delete_conversations()"> <i class="fa-brands fa-github"></i>
<i class="fa-regular fa-trash"></i> <span class="convo-title">github: <a href="https://github.com/xtekky/gpt4free">@gpt4free</a><br>
<span>Clear Conversations</span> leave a star ; )
</button> </span>
<div class="info"> </div>
<i class="fa-brands fa-discord"></i> </div>
<span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br> </div>
</span> <div class="conversation">
</div> <div class="stop_generating stop_generating-hidden">
<div class="info"> <button id="cancelButton">
<i class="fa-brands fa-github"></i> <span>Stop Generating</span>
<span class="convo-title">github: <a href="https://github.com/xtekky/gpt4free">@gpt4free</a><br> <i class="fa-regular fa-stop"></i>
leave a star ; ) </button>
</span> </div>
<div class="box" id="messages">
</div>
<div class="user-input">
<div class="box input-box">
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
style="white-space: pre-wrap;resize: none;"></textarea>
<div id="send-button">
<i class="fa-solid fa-paper-plane-top"></i>
</div> </div>
</div> </div>
</div> </div>
<div class="conversation"> <div class="buttons">
<div class="stop_generating stop_generating-hidden"> <div class="field">
<button id="cancelButton"> <input type="checkbox" id="switch" />
<span>Stop Generating</span> <label for="switch"></label>
<i class="fa-regular fa-stop"></i> <span class="about">Web Access</span>
</button>
</div> </div>
<div class="box" id="messages"> <div class="field">
<select name="model" id="model">
<option value="gpt-3.5-turbo" selected>gpt-3.5</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
</select>
</div> </div>
<div class="user-input"> <div class="field">
<div class="box input-box"> <select name="jailbreak" id="jailbreak">
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10" style="white-space: pre-wrap;resize: none;"></textarea> <option value="default" selected>Set Jailbreak</option>
<div id="send-button"> <option value="gpt-math-1.0">math 1.0</option>
<i class="fa-solid fa-paper-plane-top"></i> <option value="gpt-dude-1.0">dude 1.0</option>
</div> <option value="gpt-dan-1.0">dan 1.0</option>
</div> <option value="gpt-dan-2.0">dan 2.0</option>
</div> <option value="gpt-dev-2.0">dev 2.0</option>
<div class="buttons"> <option value="gpt-evil-1.0">evil 1.0</option>
</select>
<div class="field"> <div class="field">
<input type="checkbox" id="switch"/> <select name="provider" id="provider">
<label for="switch"></label> <option value="g4f.Provider.Auto" selected>Set Provider</option>
<span class="about">Web Access</span> <option value="g4f.Provider.AItianhuSpace">AItianhuSpace</option>
</div> <option value="g4f.Provider.ChatgptLogin">ChatgptLogin</option>
<div class="field"> <option value="g4f.Provider.ChatgptDemo">ChatgptDemo</option>
<select name="model" id="model"> <option value="g4f.Provider.ChatgptDuo">ChatgptDuo</option>
<option value="gpt-3.5-turbo" selected>gpt-3.5</option> <option value="g4f.Provider.Vitalentum">Vitalentum</option>
<option value="gpt-4">gpt-4</option> <option value="g4f.Provider.ChatgptAi">ChatgptAi</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option> <option value="g4f.Provider.AItianhu">AItianhu</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option> <option value="g4f.Provider.ChatBase">ChatBase</option>
<option value="g4f.Provider.Liaobots">Liaobots</option>
<option value="g4f.Provider.Yqcloud">Yqcloud</option>
<option value="g4f.Provider.Myshell">Myshell</option>
<option value="g4f.Provider.FreeGpt">FreeGpt</option>
<option value="g4f.Provider.Vercel">Vercel</option>
<option value="g4f.Provider.DeepAi">DeepAi</option>
<option value="g4f.Provider.Aichat">Aichat</option>
<option value="g4f.Provider.GPTalk">GPTalk</option>
<option value="g4f.Provider.GptGod">GptGod</option>
<option value="g4f.Provider.AiAsk">AiAsk</option>
<option value="g4f.Provider.GptGo">GptGo</option>
<option value="g4f.Provider.Ylokh">Ylokh</option>
<option value="g4f.Provider.Bard">Bard</option>
<option value="g4f.Provider.Aibn">Aibn</option>
<option value="g4f.Provider.Bing">Bing</option>
<option value="g4f.Provider.You">You</option>
<option value="g4f.Provider.H2o">H2o</option>
<option value="g4f.Provider.Aivvm">Aivvm</option>
</select> </select>
</div> </div>
<div class="field">
<select name="jailbreak" id="jailbreak">
<option value="default" selected>default</option>
<option value="gpt-math-1.0">math 1.0</option>
<option value="gpt-dude-1.0">dude 1.0</option>
<option value="gpt-dan-1.0">dan 1.0</option>
<option value="gpt-dan-2.0">dan 2.0</option>
<option value="gpt-dev-2.0">dev 2.0</option>
<option value="gpt-evil-1.0">evil 1.0</option>
</select>
<form class="color-picker" action="">
<fieldset>
<legend class="visually-hidden">Pick a color scheme</legend>
<label for="light" class="visually-hidden">Light</label>
<input type="radio" name="theme" id="light" checked>
<label for="pink" class="visually-hidden">Pink theme</label>
<input type="radio" id="pink" name="theme">
<label for="blue" class="visually-hidden">Blue theme</label>
<input type="radio" id="blue" name="theme">
<label for="green" class="visually-hidden">Green theme</label>
<input type="radio" id="green" name="theme">
<label for="dark" class="visually-hidden">Dark theme</label>
<input type="radio" id="dark" name="theme">
</fieldset>
</form>
</div>
</div> </div>
</div> </div>
</div> </div>
</div>
<div class="mobile-sidebar"> <div class="mobile-sidebar">
<i class="fa-solid fa-bars"></i> <i class="fa-solid fa-bars"></i>
</div> </div>
<script> <script>
</script> </script>
</body> </body>
</html> </html>

View File

@ -52,7 +52,7 @@ const remove_cancel_button = async () => {
const ask_gpt = async (message) => { const ask_gpt = async (message) => {
try { try {
message_input.value = ``; message_input.value = ``;
message_input.innerHTML = ``; message_input.innerHTML = ``;
message_input.innerText = ``; message_input.innerText = ``;
@ -60,10 +60,11 @@ const ask_gpt = async (message) => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
window.controller = new AbortController(); window.controller = new AbortController();
jailbreak = document.getElementById("jailbreak"); jailbreak = document.getElementById("jailbreak");
model = document.getElementById("model"); provider = document.getElementById("provider");
prompt_lock = true; model = document.getElementById("model");
window.text = ``; prompt_lock = true;
window.text = ``;
window.token = message_id(); window.token = message_id();
stop_generating.classList.remove(`stop_generating-hidden`); stop_generating.classList.remove(`stop_generating-hidden`);
@ -109,12 +110,15 @@ const ask_gpt = async (message) => {
headers: { headers: {
"content-type": `application/json`, "content-type": `application/json`,
accept: `text/event-stream`, accept: `text/event-stream`,
// v: `1.0.0`,
// ts: Date.now().toString(),
}, },
body: JSON.stringify({ body: JSON.stringify({
conversation_id: window.conversation_id, conversation_id: window.conversation_id,
action: `_ask`, action: `_ask`,
model: model.options[model.selectedIndex].value, model: model.options[model.selectedIndex].value,
jailbreak: jailbreak.options[jailbreak.selectedIndex].value, jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
provider: provider.options[provider.selectedIndex].value,
meta: { meta: {
id: window.token, id: window.token,
content: { content: {

View File

@ -1,4 +1,18 @@
from g4f.gui import run_gui from g4f.gui import run_gui
from argparse import ArgumentParser
if __name__ == '__main__': if __name__ == '__main__':
run_gui()
parser = ArgumentParser(description='Run the GUI')
parser.add_argument('-host', type=str, default='0.0.0.0', help='hostname')
parser.add_argument('-port', type=int, default=80, help='port')
parser.add_argument('-debug', action='store_true', help='debug mode')
args = parser.parse_args()
port = args.port
host = args.host
debug = args.debug
run_gui(host, port, debug)

View File

@ -1,9 +1,11 @@
import g4f import g4f
from flask import request from flask import request
from threading import Thread
from .internet import search from .internet import search
from .config import special_instructions from .config import special_instructions
from .provider import get_provider
g4f.logging = True
class Backend_Api: class Backend_Api:
def __init__(self, app) -> None: def __init__(self, app) -> None:
@ -31,22 +33,25 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation'] conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0] prompt = request.json['meta']['content']['parts'][0]
model = request.json['model'] model = request.json['model']
provider = get_provider(request.json.get('provider'))
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt] messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream(): def stream():
answer = g4f.ChatCompletion.create(model = model, if provider:
messages = messages, stream=True) answer = g4f.ChatCompletion.create(model=model,
provider=provider, messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True)
for token in answer: for token in answer:
yield token yield token
return self.app.response_class(stream(), mimetype='text/event-stream') return self.app.response_class(stream(), mimetype='text/event-stream')
except Exception as e: except Exception as e:
print(e)
return { return {
'_token': 'anerroroccuredmf',
'_action': '_ask', '_action': '_ask',
'success': False, 'success': False,
"error": f"an error occured {str(e)}"}, 400 "error": f"an error occured {str(e)}"}, 400

View File

@ -0,0 +1,17 @@
import g4f
def get_provider(provider: str) -> g4f.Provider.BaseProvider:
if isinstance(provider, str):
print(provider)
if provider == 'g4f.Provider.Auto':
return None
if provider in g4f.Provider.ProviderUtils.convert:
return g4f.Provider.ProviderUtils.convert[provider]
else:
return None
else:
return None

View File

@ -9,7 +9,6 @@ from .Provider import (
ChatgptDuo, ChatgptDuo,
Vitalentum, Vitalentum,
ChatgptAi, ChatgptAi,
ChatForAi,
AItianhu, AItianhu,
ChatBase, ChatBase,
Liaobots, Liaobots,
@ -23,13 +22,13 @@ from .Provider import (
GptGod, GptGod,
AiAsk, AiAsk,
GptGo, GptGo,
Aivvm,
Ylokh, Ylokh,
Bard, Bard,
Aibn, Aibn,
Bing, Bing,
You, You,
H2o H2o,
Aivvm
) )
@dataclass(unsafe_hash=True) @dataclass(unsafe_hash=True)
@ -46,7 +45,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results ChatgptDuo, # Include search results
Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh, Aibn, Aichat, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
]) ])
) )
@ -55,7 +54,7 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
AiAsk, Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo, AiAsk, Aibn, Aichat, ChatgptAi, ChatgptDemo, ChatgptDuo,
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud, FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud,
GPTalk, GptGod GPTalk, GptGod
]) ])
@ -66,16 +65,14 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
]) ])
) )
gpt_4 = Model( gpt_4 = Model(
name = 'gpt-4', name = 'gpt-4',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = Bing
Aivvm, Bing
])
) )
# Bard # Bard
@ -168,31 +165,27 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613', name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai', base_provider = 'openai')
best_provider = Aivvm)
gpt_35_turbo_0613 = Model( gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613', name = 'gpt-3.5-turbo-0613',
base_provider = 'openai', base_provider = 'openai',
best_provider = Aivvm best_provider=Aivvm
) )
gpt_4_0613 = Model( gpt_4_0613 = Model(
name = 'gpt-4-0613', name = 'gpt-4-0613',
base_provider = 'openai', base_provider = 'openai'
best_provider = Aivvm
) )
gpt_4_32k = Model( gpt_4_32k = Model(
name = 'gpt-4-32k', name = 'gpt-4-32k',
base_provider = 'openai', base_provider = 'openai'
best_provider = Aivvm
) )
gpt_4_32k_0613 = Model( gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613', name = 'gpt-4-32k-0613',
base_provider = 'openai', base_provider = 'openai'
best_provider = Aivvm
) )
text_ada_001 = Model( text_ada_001 = Model(

View File

@ -14,7 +14,7 @@ with open("requirements.txt") as f:
with open("etc/interference/requirements.txt") as f: with open("etc/interference/requirements.txt") as f:
api_required = f.read().splitlines() api_required = f.read().splitlines()
VERSION = '0.1.5.6' VERSION = '0.1.5.7'
DESCRIPTION = ( DESCRIPTION = (
"The official gpt4free repository | various collection of powerful language models" "The official gpt4free repository | various collection of powerful language models"
) )