mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-27 21:21:41 +03:00
~ | Merge pull request #1017 from hlohaus/mon
Add proxy support to all providers
This commit is contained in:
commit
8a5c23f693
73
README.md
73
README.md
@ -224,19 +224,15 @@ from g4f.Provider import (
|
|||||||
Bing,
|
Bing,
|
||||||
ChatBase,
|
ChatBase,
|
||||||
ChatgptAi,
|
ChatgptAi,
|
||||||
ChatgptLogin,
|
|
||||||
CodeLinkAva,
|
|
||||||
DeepAi,
|
DeepAi,
|
||||||
H2o,
|
H2o,
|
||||||
HuggingChat,
|
HuggingChat,
|
||||||
Opchatgpts,
|
|
||||||
OpenAssistant,
|
OpenAssistant,
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
Raycast,
|
Raycast,
|
||||||
Theb,
|
Theb,
|
||||||
Vercel,
|
Vercel,
|
||||||
Vitalentum,
|
Vitalentum,
|
||||||
Wewordle,
|
|
||||||
Ylokh,
|
Ylokh,
|
||||||
You,
|
You,
|
||||||
Yqcloud,
|
Yqcloud,
|
||||||
@ -284,19 +280,18 @@ _providers = [
|
|||||||
g4f.Provider.Aichat,
|
g4f.Provider.Aichat,
|
||||||
g4f.Provider.ChatBase,
|
g4f.Provider.ChatBase,
|
||||||
g4f.Provider.Bing,
|
g4f.Provider.Bing,
|
||||||
g4f.Provider.CodeLinkAva,
|
|
||||||
g4f.Provider.DeepAi,
|
g4f.Provider.DeepAi,
|
||||||
g4f.Provider.GptGo,
|
g4f.Provider.GptGo,
|
||||||
g4f.Provider.Wewordle,
|
|
||||||
g4f.Provider.You,
|
g4f.Provider.You,
|
||||||
g4f.Provider.Yqcloud,
|
g4f.Provider.Yqcloud,
|
||||||
]
|
]
|
||||||
|
|
||||||
async def run_provider(provider: g4f.Provider.AsyncProvider):
|
async def run_provider(provider: g4f.Provider.BaseProvider):
|
||||||
try:
|
try:
|
||||||
response = await provider.create_async(
|
response = await g4f.ChatCompletion.create_async(
|
||||||
model=g4f.models.default.name,
|
model=g4f.models.default,
|
||||||
messages=[{"role": "user", "content": "Hello"}],
|
messages=[{"role": "user", "content": "Hello"}],
|
||||||
|
provider=provider,
|
||||||
)
|
)
|
||||||
print(f"{provider.__name__}:", response)
|
print(f"{provider.__name__}:", response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -311,6 +306,22 @@ async def run_all():
|
|||||||
asyncio.run(run_all())
|
asyncio.run(run_all())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Proxy Support:
|
||||||
|
|
||||||
|
All providers support specifying a proxy in the create function.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import g4f
|
||||||
|
|
||||||
|
response = await g4f.ChatCompletion.create(
|
||||||
|
model=g4f.models.default,
|
||||||
|
messages=[{"role": "user", "content": "Hello"}],
|
||||||
|
proxy="http://host:port",
|
||||||
|
# or socks5://user:pass@host:port
|
||||||
|
)
|
||||||
|
print(f"Result:", response)
|
||||||
|
```
|
||||||
|
|
||||||
### interference openai-proxy api (use with openai python package)
|
### interference openai-proxy api (use with openai python package)
|
||||||
|
|
||||||
#### run interference from pypi package:
|
#### run interference from pypi package:
|
||||||
@ -521,38 +532,50 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
|
|
||||||
to add another provider, its very simple:
|
####Create Provider with AI Tool
|
||||||
|
|
||||||
|
Call in your terminal the "create_provider" script:
|
||||||
|
```bash
|
||||||
|
$ python etc/tool/create_provider.py
|
||||||
|
```
|
||||||
|
1. Enter your name for the new provider.
|
||||||
|
2. Copy&Paste a cURL command from your browser developer tools.
|
||||||
|
3. Let the AI create the provider for you.
|
||||||
|
4. Customize the provider according to your needs.
|
||||||
|
|
||||||
|
####Create Provider
|
||||||
|
|
||||||
0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
|
0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
|
||||||
1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
|
1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
|
||||||
2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
|
2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from .base_provider import BaseProvider
|
from __future__ import annotations
|
||||||
from ..typing import CreateResult, Any
|
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
class HogeService(BaseProvider):
|
class HogeService(AsyncGeneratorProvider):
|
||||||
url = "http://hoge.com"
|
url = "https://chat-gpt.com"
|
||||||
working = True
|
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
working = True
|
||||||
|
|
||||||
@staticmethod
|
@classmethod
|
||||||
def create_completion(
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool,
|
proxy: str = None,
|
||||||
**kwargs: Any,
|
**kwargs
|
||||||
) -> CreateResult:
|
) -> AsyncResult:
|
||||||
pass
|
yield ""
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`...
|
3. Here, you can adjust the settings, for example if the website does support streaming, set `supports_stream` to `True`...
|
||||||
4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration
|
4. Write code to request the provider in `create_async_generator` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration
|
||||||
5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
|
5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from .base_provider import BaseProvider
|
|
||||||
from .HogeService import HogeService
|
from .HogeService import HogeService
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
@ -12,23 +12,13 @@ def read_code(text):
|
|||||||
if match:
|
if match:
|
||||||
return match.group("code")
|
return match.group("code")
|
||||||
|
|
||||||
def read_result(result):
|
|
||||||
lines = []
|
|
||||||
for line in result.split("\n"):
|
|
||||||
if (line.startswith("```")):
|
|
||||||
break
|
|
||||||
if (line):
|
|
||||||
lines.append(line)
|
|
||||||
explanation = "\n".join(lines) if lines else ""
|
|
||||||
return explanation, read_code(result)
|
|
||||||
|
|
||||||
def input_command():
|
def input_command():
|
||||||
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
|
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
|
||||||
contents = []
|
contents = []
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
line = input()
|
line = input()
|
||||||
except:
|
except EOFError:
|
||||||
break
|
break
|
||||||
contents.append(line)
|
contents.append(line)
|
||||||
return "\n".join(contents)
|
return "\n".join(contents)
|
||||||
@ -41,12 +31,12 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
|
||||||
class ChatgptDuo(AsyncGeneratorProvider):
|
class ChatGpt(AsyncGeneratorProvider):
|
||||||
url = "https://chat-gpt.com"
|
url = "https://chat-gpt.com"
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
working = True
|
working = True
|
||||||
@ -55,9 +45,10 @@ class ChatgptDuo(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
"authority": "chat-gpt.com",
|
"authority": "chat-gpt.com",
|
||||||
"accept": "application/json",
|
"accept": "application/json",
|
||||||
@ -65,16 +56,16 @@ class ChatgptDuo(AsyncGeneratorProvider):
|
|||||||
"referer": f"{cls.url}/chat",
|
"referer": f"{cls.url}/chat",
|
||||||
}
|
}
|
||||||
async with ClientSession(headers=headers) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
prompt = format_prompt(messages),
|
prompt = format_prompt(messages)
|
||||||
data = {
|
data = {
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"purpose": "ask",
|
"purpose": "",
|
||||||
}
|
}
|
||||||
async with session.post(cls.url + "/api/chat", json=data) as response:
|
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for stream in response.content:
|
async for chunk in response.content:
|
||||||
if stream:
|
if chunk:
|
||||||
yield stream.decode()
|
yield chunk.decode()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not path.isfile(provider_path):
|
if not path.isfile(provider_path):
|
||||||
@ -95,18 +86,23 @@ Replace "hello" with `format_prompt(messages)`.
|
|||||||
And replace "gpt-3.5-turbo" with `model`.
|
And replace "gpt-3.5-turbo" with `model`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
print("Create code...")
|
response = []
|
||||||
response = g4f.ChatCompletion.create(
|
for chunk in g4f.ChatCompletion.create(
|
||||||
model=g4f.models.gpt_35_long,
|
model=g4f.models.gpt_35_long,
|
||||||
messages=[{"role": "user", "content": prompt}],
|
messages=[{"role": "user", "content": prompt}],
|
||||||
auth=True,
|
timeout=300,
|
||||||
timeout=120,
|
stream=True
|
||||||
)
|
):
|
||||||
print(response)
|
response.append(chunk)
|
||||||
explanation, code = read_result(response)
|
print(chunk, end="", flush=True)
|
||||||
|
print()
|
||||||
|
response = "".join(response)
|
||||||
|
|
||||||
|
code = read_code(response)
|
||||||
if code:
|
if code:
|
||||||
with open(provider_path, "w") as file:
|
with open(provider_path, "w") as file:
|
||||||
file.write(code)
|
file.write(code)
|
||||||
|
print("Saved at:", provider_path)
|
||||||
with open(f"g4f/Provider/__init__.py", "a") as file:
|
with open(f"g4f/Provider/__init__.py", "a") as file:
|
||||||
file.write(f"\nfrom .{name} import {name}")
|
file.write(f"\nfrom .{name} import {name}")
|
||||||
else:
|
else:
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||||
|
|
||||||
@ -16,12 +16,12 @@ class AItianhu(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
timeout: int = 30,
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not cookies:
|
if not cookies:
|
||||||
cookies = get_cookies("www.aitianhu.com")
|
cookies = get_cookies("www.aitianhu.com")
|
||||||
data = {
|
data = {
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import random, json
|
import random, json
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||||
|
|
||||||
@ -20,13 +20,13 @@ class AItianhuSpace(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
domain: str = None,
|
domain: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
timeout: int = 30,
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
elif not model in domains:
|
elif not model in domains:
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
|
||||||
@ -15,11 +15,10 @@ class Acytoo(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
|
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=_create_header()
|
headers=_create_header()
|
||||||
) as session:
|
) as session:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
class AiAsk(AsyncGeneratorProvider):
|
class AiAsk(AsyncGeneratorProvider):
|
||||||
@ -13,9 +13,10 @@ class AiAsk(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
"accept": "application/json, text/plain, */*",
|
"accept": "application/json, text/plain, */*",
|
||||||
"origin": cls.url,
|
"origin": cls.url,
|
||||||
@ -33,7 +34,7 @@ class AiAsk(AsyncGeneratorProvider):
|
|||||||
}
|
}
|
||||||
buffer = ""
|
buffer = ""
|
||||||
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
|
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
|
||||||
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
|
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for chunk in response.content.iter_any():
|
async for chunk in response.content.iter_any():
|
||||||
buffer += chunk.decode()
|
buffer += chunk.decode()
|
||||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import time
|
import time
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
@ -17,11 +17,16 @@ class Aibn(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
timeout: int = 30,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
async with StreamSession(
|
||||||
|
impersonate="chrome107",
|
||||||
|
proxies={"https": proxy},
|
||||||
|
timeout=timeout
|
||||||
|
) as session:
|
||||||
timestamp = int(time.time())
|
timestamp = int(time.time())
|
||||||
data = {
|
data = {
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
|
@ -2,6 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import Messages
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
@ -13,7 +14,7 @@ class Aichat(AsyncProvider):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def create_async(
|
async def create_async(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
@ -7,7 +7,7 @@ import json
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import SHA256, AsyncGenerator
|
from ..typing import SHA256, AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
|
||||||
@ -19,11 +19,11 @@ class Ails(AsyncGeneratorProvider):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
"authority": "api.caipacity.com",
|
"authority": "api.caipacity.com",
|
||||||
"accept": "*/*",
|
"accept": "*/*",
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
|
|
||||||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
||||||
models = {
|
models = {
|
||||||
@ -26,11 +26,12 @@ class Aivvm(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
timeout: int = 30,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
elif model not in models:
|
elif model not in models:
|
||||||
@ -48,7 +49,12 @@ class Aivvm(AsyncGeneratorProvider):
|
|||||||
"Origin": cls.url,
|
"Origin": cls.url,
|
||||||
"Referer": f"{cls.url}/",
|
"Referer": f"{cls.url}/",
|
||||||
}
|
}
|
||||||
async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
|
async with StreamSession(
|
||||||
|
impersonate="chrome107",
|
||||||
|
headers=headers,
|
||||||
|
proxies={"https": proxy},
|
||||||
|
timeout=timeout
|
||||||
|
) as session:
|
||||||
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
|
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for chunk in response.iter_content():
|
async for chunk in response.iter_content():
|
||||||
|
@ -7,7 +7,7 @@ import os
|
|||||||
import uuid
|
import uuid
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession, ClientTimeout
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
class Tones():
|
class Tones():
|
||||||
@ -32,11 +32,12 @@ class Bing(AsyncGeneratorProvider):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def create_async_generator(
|
def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
tone: str = Tones.creative,
|
tone: str = Tones.creative,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if len(messages) < 2:
|
if len(messages) < 2:
|
||||||
prompt = messages[0]["content"]
|
prompt = messages[0]["content"]
|
||||||
context = None
|
context = None
|
||||||
@ -46,9 +47,9 @@ class Bing(AsyncGeneratorProvider):
|
|||||||
|
|
||||||
if not cookies or "SRCHD" not in cookies:
|
if not cookies or "SRCHD" not in cookies:
|
||||||
cookies = default_cookies
|
cookies = default_cookies
|
||||||
return stream_generate(prompt, tone, context, cookies)
|
return stream_generate(prompt, tone, context, proxy, cookies)
|
||||||
|
|
||||||
def create_context(messages: list[dict[str, str]]):
|
def create_context(messages: Messages):
|
||||||
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
|
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
|
||||||
|
|
||||||
return context
|
return context
|
||||||
@ -59,10 +60,10 @@ class Conversation():
|
|||||||
self.clientId = clientId
|
self.clientId = clientId
|
||||||
self.conversationSignature = conversationSignature
|
self.conversationSignature = conversationSignature
|
||||||
|
|
||||||
async def create_conversation(session: ClientSession) -> Conversation:
|
async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
|
||||||
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
|
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
|
||||||
|
|
||||||
async with await session.get(url) as response:
|
async with await session.get(url, proxy=proxy) as response:
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
|
|
||||||
conversationId = data.get('conversationId')
|
conversationId = data.get('conversationId')
|
||||||
@ -80,7 +81,7 @@ async def list_conversations(session: ClientSession) -> list:
|
|||||||
response = await response.json()
|
response = await response.json()
|
||||||
return response["chats"]
|
return response["chats"]
|
||||||
|
|
||||||
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
|
async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
|
||||||
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
|
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
|
||||||
json = {
|
json = {
|
||||||
"conversationId": conversation.conversationId,
|
"conversationId": conversation.conversationId,
|
||||||
@ -89,7 +90,7 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
|
|||||||
"source": "cib",
|
"source": "cib",
|
||||||
"optionsSets": ["autosave"]
|
"optionsSets": ["autosave"]
|
||||||
}
|
}
|
||||||
async with session.post(url, json=json) as response:
|
async with session.post(url, json=json, proxy=proxy) as response:
|
||||||
response = await response.json()
|
response = await response.json()
|
||||||
return response["result"]["value"] == "Success"
|
return response["result"]["value"] == "Success"
|
||||||
|
|
||||||
@ -239,20 +240,22 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
|
|||||||
async def stream_generate(
|
async def stream_generate(
|
||||||
prompt: str,
|
prompt: str,
|
||||||
tone: str,
|
tone: str,
|
||||||
context: str=None,
|
context: str = None,
|
||||||
cookies: dict=None,
|
proxy: str = None,
|
||||||
|
cookies: dict = None
|
||||||
):
|
):
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
timeout=ClientTimeout(total=900),
|
timeout=ClientTimeout(total=900),
|
||||||
cookies=cookies,
|
cookies=cookies,
|
||||||
headers=Defaults.headers,
|
headers=Defaults.headers,
|
||||||
) as session:
|
) as session:
|
||||||
conversation = await create_conversation(session)
|
conversation = await create_conversation(session, proxy)
|
||||||
try:
|
try:
|
||||||
async with session.ws_connect(
|
async with session.ws_connect(
|
||||||
f'wss://sydney.bing.com/sydney/ChatHub',
|
f'wss://sydney.bing.com/sydney/ChatHub',
|
||||||
autoping=False,
|
autoping=False,
|
||||||
params={'sec_access_token': conversation.conversationSignature}
|
params={'sec_access_token': conversation.conversationSignature},
|
||||||
|
proxy=proxy
|
||||||
) as wss:
|
) as wss:
|
||||||
|
|
||||||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||||
@ -297,4 +300,4 @@ async def stream_generate(
|
|||||||
raise Exception(f"{result['value']}: {result['message']}")
|
raise Exception(f"{result['value']}: {result['message']}")
|
||||||
return
|
return
|
||||||
finally:
|
finally:
|
||||||
await delete_conversation(session, conversation)
|
await delete_conversation(session, conversation, proxy)
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
|
||||||
@ -16,9 +16,10 @@ class ChatBase(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if model == "gpt-4":
|
if model == "gpt-4":
|
||||||
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
|
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
|
||||||
elif model == "gpt-3.5-turbo" or not model:
|
elif model == "gpt-3.5-turbo" or not model:
|
||||||
@ -44,7 +45,7 @@ class ChatBase(AsyncGeneratorProvider):
|
|||||||
"chatId": chat_id,
|
"chatId": chat_id,
|
||||||
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
|
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
|
||||||
}
|
}
|
||||||
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
|
async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for stream in response.content.iter_any():
|
async for stream in response.content.iter_any():
|
||||||
yield stream.decode()
|
yield stream.decode()
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
@ -14,11 +14,12 @@ class ChatForAi(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
timeout: int = 30,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
|
||||||
prompt = messages[-1]["content"]
|
prompt = messages[-1]["content"]
|
||||||
data = {
|
data = {
|
||||||
"conversationId": "temp",
|
"conversationId": "temp",
|
||||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import json
|
import json
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
|
||||||
@ -16,9 +16,10 @@ class Chatgpt4Online(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
async with ClientSession() as session:
|
async with ClientSession() as session:
|
||||||
data = {
|
data = {
|
||||||
"botId": "default",
|
"botId": "default",
|
||||||
@ -30,7 +31,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
|
|||||||
"newMessage": messages[-1]["content"],
|
"newMessage": messages[-1]["content"],
|
||||||
"stream": True
|
"stream": True
|
||||||
}
|
}
|
||||||
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
|
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
if line.startswith(b"data: "):
|
if line.startswith(b"data: "):
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
|||||||
import re
|
import re
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import Messages
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
@ -18,7 +19,7 @@ class ChatgptAi(AsyncProvider):
|
|||||||
async def create_async(
|
async def create_async(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
@ -2,8 +2,8 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import time, json, re
|
import time, json, re
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from typing import AsyncGenerator
|
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
@ -16,10 +16,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
"authority": "chat.chatgptdemo.net",
|
"authority": "chat.chatgptdemo.net",
|
||||||
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
|
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from ..typing import Messages
|
||||||
from curl_cffi.requests import AsyncSession
|
from curl_cffi.requests import AsyncSession
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
@ -13,9 +14,9 @@ class ChatgptDuo(AsyncProvider):
|
|||||||
async def create_async(
|
async def create_async(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
async with AsyncSession(
|
async with AsyncSession(
|
||||||
|
@ -19,6 +19,7 @@ class ChatgptX(AsyncGeneratorProvider):
|
|||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
@ -32,7 +33,7 @@ class ChatgptX(AsyncGeneratorProvider):
|
|||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
||||||
}
|
}
|
||||||
async with ClientSession(headers=headers) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
async with session.get(f"{cls.url}/") as response:
|
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||||
response = await response.text()
|
response = await response.text()
|
||||||
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
|
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
|
||||||
if result:
|
if result:
|
||||||
@ -62,7 +63,7 @@ class ChatgptX(AsyncGeneratorProvider):
|
|||||||
'x-csrf-token': csrf_token,
|
'x-csrf-token': csrf_token,
|
||||||
'x-requested-with': 'XMLHttpRequest'
|
'x-requested-with': 'XMLHttpRequest'
|
||||||
}
|
}
|
||||||
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
|
async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
chat = await response.json()
|
chat = await response.json()
|
||||||
if "response" not in chat or not chat["response"]:
|
if "response" not in chat or not chat["response"]:
|
||||||
@ -82,7 +83,7 @@ class ChatgptX(AsyncGeneratorProvider):
|
|||||||
"conversions_id": chat["conversions_id"],
|
"conversions_id": chat["conversions_id"],
|
||||||
"ass_conversions_id": chat["ass_conversions_id"],
|
"ass_conversions_id": chat["ass_conversions_id"],
|
||||||
}
|
}
|
||||||
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response:
|
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
if line.startswith(b"data: "):
|
if line.startswith(b"data: "):
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from typing import AsyncGenerator, Dict, List
|
from ..typing import AsyncResult, Messages, Dict
|
||||||
|
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
@ -17,10 +17,10 @@ class Cromicle(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: List[Dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator[str, None]:
|
) -> AsyncResult:
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=_create_header()
|
headers=_create_header()
|
||||||
) as session:
|
) as session:
|
||||||
|
@ -6,22 +6,22 @@ import random
|
|||||||
import hashlib
|
import hashlib
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
|
||||||
class DeepAi(AsyncGeneratorProvider):
|
class DeepAi(AsyncGeneratorProvider):
|
||||||
url: str = "https://deepai.org"
|
url = "https://deepai.org"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
|
|
||||||
token_js = """
|
token_js = """
|
||||||
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import time, hashlib, random
|
import time, hashlib, random
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests import StreamSession
|
from ..requests import StreamSession
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
@ -20,11 +20,16 @@ class FreeGpt(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
timeout: int = 30,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
async with StreamSession(
|
||||||
|
impersonate="chrome107",
|
||||||
|
timeout=timeout,
|
||||||
|
proxies={"https": proxy}
|
||||||
|
) as session:
|
||||||
prompt = messages[-1]["content"]
|
prompt = messages[-1]["content"]
|
||||||
timestamp = int(time.time())
|
timestamp = int(time.time())
|
||||||
data = {
|
data = {
|
||||||
|
@ -2,8 +2,8 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import secrets, time, json
|
import secrets, time, json
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from typing import AsyncGenerator
|
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
@ -18,9 +18,10 @@ class GPTalk(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
timestamp = int(time.time())
|
timestamp = int(time.time())
|
||||||
@ -48,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
|
|||||||
"fingerprint": secrets.token_hex(16).zfill(32),
|
"fingerprint": secrets.token_hex(16).zfill(32),
|
||||||
"platform": "fingerprint"
|
"platform": "fingerprint"
|
||||||
}
|
}
|
||||||
async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response:
|
async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
cls._auth = (await response.json())["data"]
|
cls._auth = (await response.json())["data"]
|
||||||
data = {
|
data = {
|
||||||
@ -68,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
|
|||||||
headers = {
|
headers = {
|
||||||
'authorization': f'Bearer {cls._auth["token"]}',
|
'authorization': f'Bearer {cls._auth["token"]}',
|
||||||
}
|
}
|
||||||
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response:
|
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
token = (await response.json())["data"]["token"]
|
token = (await response.json())["data"]["token"]
|
||||||
last_message = ""
|
last_message = ""
|
||||||
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response:
|
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
if line.startswith(b"data: "):
|
if line.startswith(b"data: "):
|
||||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
import execjs, os, json
|
import execjs, os, json
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
@ -16,9 +16,10 @@ class GptForLove(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
headers = {
|
headers = {
|
||||||
@ -47,7 +48,7 @@ class GptForLove(AsyncGeneratorProvider):
|
|||||||
"secret": get_secret(),
|
"secret": get_secret(),
|
||||||
**kwargs
|
**kwargs
|
||||||
}
|
}
|
||||||
async with session.post("https://api.gptplus.one/chat-process", json=data) as response:
|
async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
try:
|
try:
|
||||||
|
@ -18,7 +18,6 @@ class GptGo(AsyncGeneratorProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
headers = {
|
headers = {
|
||||||
@ -73,6 +72,7 @@ class GptGo(AsyncGeneratorProvider):
|
|||||||
("model", "str"),
|
("model", "str"),
|
||||||
("messages", "list[dict[str, str]]"),
|
("messages", "list[dict[str, str]]"),
|
||||||
("stream", "bool"),
|
("stream", "bool"),
|
||||||
|
("proxy", "str"),
|
||||||
("temperature", "float"),
|
("temperature", "float"),
|
||||||
]
|
]
|
||||||
param = ", ".join([": ".join(p) for p in params])
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import secrets, json
|
import secrets, json
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
@ -14,9 +14,10 @@ class GptGod(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||||
"Accept": "text/event-stream",
|
"Accept": "text/event-stream",
|
||||||
@ -24,7 +25,7 @@ class GptGod(AsyncGeneratorProvider):
|
|||||||
"Accept-Encoding": "gzip, deflate, br",
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
"Alt-Used": "gptgod.site",
|
"Alt-Used": "gptgod.site",
|
||||||
"Connection": "keep-alive",
|
"Connection": "keep-alive",
|
||||||
"Referer": "https://gptgod.site/",
|
"Referer": f"{cls.url}/",
|
||||||
"Sec-Fetch-Dest": "empty",
|
"Sec-Fetch-Dest": "empty",
|
||||||
"Sec-Fetch-Mode": "cors",
|
"Sec-Fetch-Mode": "cors",
|
||||||
"Sec-Fetch-Site": "same-origin",
|
"Sec-Fetch-Site": "same-origin",
|
||||||
@ -37,7 +38,7 @@ class GptGod(AsyncGeneratorProvider):
|
|||||||
"content": prompt,
|
"content": prompt,
|
||||||
"id": secrets.token_hex(16).zfill(32)
|
"id": secrets.token_hex(16).zfill(32)
|
||||||
}
|
}
|
||||||
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
|
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
event = None
|
event = None
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
|
@ -4,7 +4,7 @@ import uuid
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
models = {
|
models = {
|
||||||
@ -39,11 +39,11 @@ class Liaobots(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
auth: str = None,
|
auth: str = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
model = model if model in models else "gpt-3.5-turbo"
|
model = model if model in models else "gpt-3.5-turbo"
|
||||||
headers = {
|
headers = {
|
||||||
"authority": "liaobots.com",
|
"authority": "liaobots.com",
|
||||||
|
@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
|
|||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
|
proxy: str = None,
|
||||||
timeout: int = 90,
|
timeout: int = 90,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
@ -47,7 +48,8 @@ class Myshell(AsyncGeneratorProvider):
|
|||||||
async with session.ws_connect(
|
async with session.ws_connect(
|
||||||
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
|
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
|
||||||
autoping=False,
|
autoping=False,
|
||||||
timeout=timeout
|
timeout=timeout,
|
||||||
|
proxy=proxy
|
||||||
) as wss:
|
) as wss:
|
||||||
# Send and receive hello message
|
# Send and receive hello message
|
||||||
await wss.receive_str()
|
await wss.receive_str()
|
||||||
|
@ -19,6 +19,7 @@ class Phind(AsyncGeneratorProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
||||||
@ -43,7 +44,12 @@ class Phind(AsyncGeneratorProvider):
|
|||||||
"Origin": cls.url,
|
"Origin": cls.url,
|
||||||
"Referer": f"{cls.url}/"
|
"Referer": f"{cls.url}/"
|
||||||
}
|
}
|
||||||
async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session:
|
async with StreamSession(
|
||||||
|
headers=headers,
|
||||||
|
timeout=(5, timeout),
|
||||||
|
proxies={"https": proxy},
|
||||||
|
impersonate="chrome107"
|
||||||
|
) as session:
|
||||||
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
|
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
new_lines = 0
|
new_lines = 0
|
||||||
@ -71,6 +77,7 @@ class Phind(AsyncGeneratorProvider):
|
|||||||
("messages", "list[dict[str, str]]"),
|
("messages", "list[dict[str, str]]"),
|
||||||
("stream", "bool"),
|
("stream", "bool"),
|
||||||
("proxy", "str"),
|
("proxy", "str"),
|
||||||
|
("timeout", "int"),
|
||||||
]
|
]
|
||||||
param = ", ".join([": ".join(p) for p in params])
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json, base64, requests, execjs, random, uuid
|
import json, base64, requests, execjs, random, uuid
|
||||||
|
|
||||||
from ..typing import Any, TypedDict, CreateResult
|
from ..typing import Messages, TypedDict, CreateResult
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
|
|
||||||
@ -17,8 +17,9 @@ class Vercel(BaseProvider):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> CreateResult:
|
) -> CreateResult:
|
||||||
if not model:
|
if not model:
|
||||||
@ -52,15 +53,18 @@ class Vercel(BaseProvider):
|
|||||||
'model' : model_info[model]['id'],
|
'model' : model_info[model]['id'],
|
||||||
'messages' : messages,
|
'messages' : messages,
|
||||||
'playgroundId': str(uuid.uuid4()),
|
'playgroundId': str(uuid.uuid4()),
|
||||||
'chatIndex' : 0} | model_info[model]['default_params']
|
'chatIndex' : 0,
|
||||||
|
**model_info[model]['default_params'],
|
||||||
|
**kwargs
|
||||||
|
}
|
||||||
|
|
||||||
max_retries = kwargs.get('max_retries', 20)
|
max_retries = kwargs.get('max_retries', 20)
|
||||||
for i in range(max_retries):
|
for i in range(max_retries):
|
||||||
response = requests.post('https://sdk.vercel.ai/api/generate',
|
response = requests.post('https://sdk.vercel.ai/api/generate',
|
||||||
headers=headers, json=json_data, stream=True)
|
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
|
||||||
try:
|
try:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
except:
|
except Exception:
|
||||||
continue
|
continue
|
||||||
for token in response.iter_content(chunk_size=None):
|
for token in response.iter_content(chunk_size=None):
|
||||||
yield token.decode()
|
yield token.decode()
|
||||||
|
@ -42,7 +42,7 @@ class Vitalentum(AsyncGeneratorProvider):
|
|||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
line = line.decode()
|
line = line.decode()
|
||||||
|
@ -6,7 +6,9 @@ import re
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..base_provider import AsyncProvider, format_prompt, get_cookies
|
from ...typing import Messages
|
||||||
|
from ..base_provider import AsyncProvider
|
||||||
|
from ..helper import format_prompt, get_cookies
|
||||||
|
|
||||||
|
|
||||||
class Bard(AsyncProvider):
|
class Bard(AsyncProvider):
|
||||||
@ -19,25 +21,22 @@ class Bard(AsyncProvider):
|
|||||||
async def create_async(
|
async def create_async(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
prompt = format_prompt(messages)
|
prompt = format_prompt(messages)
|
||||||
if proxy and "://" not in proxy:
|
|
||||||
proxy = f"http://{proxy}"
|
|
||||||
if not cookies:
|
if not cookies:
|
||||||
cookies = get_cookies(".google.com")
|
cookies = get_cookies(".google.com")
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'authority': 'bard.google.com',
|
'authority': 'bard.google.com',
|
||||||
'origin': 'https://bard.google.com',
|
'origin': cls.url,
|
||||||
'referer': 'https://bard.google.com/',
|
'referer': f'{cls.url}/',
|
||||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||||
'x-same-domain': '1',
|
'x-same-domain': '1',
|
||||||
}
|
}
|
||||||
|
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
cookies=cookies,
|
cookies=cookies,
|
||||||
headers=headers
|
headers=headers
|
||||||
@ -67,7 +66,6 @@ class Bard(AsyncProvider):
|
|||||||
'lamda',
|
'lamda',
|
||||||
'BardFrontendService'
|
'BardFrontendService'
|
||||||
])
|
])
|
||||||
|
|
||||||
async with session.post(
|
async with session.post(
|
||||||
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
|
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
|
||||||
data=data,
|
data=data,
|
||||||
|
@ -4,8 +4,9 @@ import json, uuid
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ...typing import AsyncGenerator
|
from ...typing import AsyncResult, Messages
|
||||||
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
from ..base_provider import AsyncGeneratorProvider
|
||||||
|
from ..helper import format_prompt, get_cookies
|
||||||
|
|
||||||
|
|
||||||
class HuggingChat(AsyncGeneratorProvider):
|
class HuggingChat(AsyncGeneratorProvider):
|
||||||
@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
model = model if model else cls.model
|
model = model if model else cls.model
|
||||||
if proxy and "://" not in proxy:
|
if proxy and "://" not in proxy:
|
||||||
proxy = f"http://{proxy}"
|
proxy = f"http://{proxy}"
|
||||||
|
@ -4,8 +4,9 @@ import json
|
|||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ...typing import Any, AsyncGenerator
|
from ...typing import AsyncResult, Messages
|
||||||
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
from ..base_provider import AsyncGeneratorProvider
|
||||||
|
from ..helper import format_prompt, get_cookies
|
||||||
|
|
||||||
|
|
||||||
class OpenAssistant(AsyncGeneratorProvider):
|
class OpenAssistant(AsyncGeneratorProvider):
|
||||||
@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs: Any
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncResult:
|
||||||
if not cookies:
|
if not cookies:
|
||||||
cookies = get_cookies("open-assistant.io")
|
cookies = get_cookies("open-assistant.io")
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ import uuid, json, time
|
|||||||
|
|
||||||
from ..base_provider import AsyncGeneratorProvider
|
from ..base_provider import AsyncGeneratorProvider
|
||||||
from ..helper import get_browser, get_cookies, format_prompt
|
from ..helper import get_browser, get_cookies, format_prompt
|
||||||
from ...typing import AsyncGenerator
|
from ...typing import AsyncResult, Messages
|
||||||
from ...requests import StreamSession
|
from ...requests import StreamSession
|
||||||
|
|
||||||
class OpenaiChat(AsyncGeneratorProvider):
|
class OpenaiChat(AsyncGeneratorProvider):
|
||||||
@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
access_token: str = None,
|
access_token: str = None,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
timeout: int = 30,
|
**kwargs
|
||||||
**kwargs: dict
|
) -> AsyncResult:
|
||||||
) -> AsyncGenerator:
|
|
||||||
proxies = {"https": proxy}
|
proxies = {"https": proxy}
|
||||||
if not access_token:
|
if not access_token:
|
||||||
access_token = await cls.get_access_token(cookies, proxies)
|
access_token = await cls.get_access_token(cookies, proxies)
|
||||||
@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||||||
"Accept": "text/event-stream",
|
"Accept": "text/event-stream",
|
||||||
"Authorization": f"Bearer {access_token}",
|
"Authorization": f"Bearer {access_token}",
|
||||||
}
|
}
|
||||||
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
|
async with StreamSession(
|
||||||
|
proxies=proxies,
|
||||||
|
headers=headers,
|
||||||
|
impersonate="chrome107",
|
||||||
|
timeout=timeout
|
||||||
|
) as session:
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"id": str(uuid.uuid4()),
|
"id": str(uuid.uuid4()),
|
||||||
|
@ -4,7 +4,7 @@ import json
|
|||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import BaseProvider
|
||||||
|
|
||||||
|
|
||||||
@ -19,9 +19,10 @@ class Raycast(BaseProvider):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
**kwargs: Any,
|
proxy: str = None,
|
||||||
|
**kwargs,
|
||||||
) -> CreateResult:
|
) -> CreateResult:
|
||||||
auth = kwargs.get('auth')
|
auth = kwargs.get('auth')
|
||||||
headers = {
|
headers = {
|
||||||
@ -47,7 +48,13 @@ class Raycast(BaseProvider):
|
|||||||
"system_instruction": "markdown",
|
"system_instruction": "markdown",
|
||||||
"temperature": 0.5
|
"temperature": 0.5
|
||||||
}
|
}
|
||||||
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
|
response = requests.post(
|
||||||
|
"https://backend.raycast.com/api/v1/ai/chat_completions",
|
||||||
|
headers=headers,
|
||||||
|
json=data,
|
||||||
|
stream=True,
|
||||||
|
proxies={"https": proxy}
|
||||||
|
)
|
||||||
for token in response.iter_lines():
|
for token in response.iter_lines():
|
||||||
if b'data: ' not in token:
|
if b'data: ' not in token:
|
||||||
continue
|
continue
|
||||||
|
@ -2,11 +2,11 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import BaseProvider
|
||||||
|
from ..helper import format_prompt
|
||||||
|
|
||||||
|
|
||||||
class Theb(BaseProvider):
|
class Theb(BaseProvider):
|
||||||
@ -19,12 +19,11 @@ class Theb(BaseProvider):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
stream: bool, **kwargs: Any) -> CreateResult:
|
stream: bool,
|
||||||
|
proxy: str = None,
|
||||||
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
**kwargs
|
||||||
conversation += "\nassistant: "
|
) -> CreateResult:
|
||||||
|
|
||||||
auth = kwargs.get("auth", {
|
auth = kwargs.get("auth", {
|
||||||
"bearer_token":"free",
|
"bearer_token":"free",
|
||||||
"org_id":"theb",
|
"org_id":"theb",
|
||||||
@ -54,7 +53,7 @@ class Theb(BaseProvider):
|
|||||||
req_rand = random.randint(100000000, 9999999999)
|
req_rand = random.randint(100000000, 9999999999)
|
||||||
|
|
||||||
json_data: dict[str, Any] = {
|
json_data: dict[str, Any] = {
|
||||||
"text" : conversation,
|
"text" : format_prompt(messages),
|
||||||
"category" : "04f58f64a4aa4191a957b47290fee864",
|
"category" : "04f58f64a4aa4191a957b47290fee864",
|
||||||
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
|
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
|
||||||
"model_params": {
|
"model_params": {
|
||||||
@ -67,8 +66,13 @@ class Theb(BaseProvider):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
response = requests.post(
|
||||||
headers=headers, json=json_data, stream=True)
|
f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
||||||
|
headers=headers,
|
||||||
|
json=json_data,
|
||||||
|
stream=True,
|
||||||
|
proxies={"https": proxy}
|
||||||
|
)
|
||||||
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
content = ""
|
content = ""
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
from requests import get
|
from requests import get
|
||||||
from g4f.models import Model, ModelUtils
|
from g4f.models import Model, ModelUtils
|
||||||
from .Provider import BaseProvider
|
from .Provider import BaseProvider
|
||||||
from .typing import CreateResult, Union
|
from .typing import Messages, CreateResult, Union
|
||||||
from .debug import logging
|
from .debug import logging
|
||||||
|
|
||||||
version = '0.1.5.6'
|
version = '0.1.5.6'
|
||||||
@ -27,19 +27,19 @@ def get_model_and_provider(model : Union[Model, str],
|
|||||||
if model in ModelUtils.convert:
|
if model in ModelUtils.convert:
|
||||||
model = ModelUtils.convert[model]
|
model = ModelUtils.convert[model]
|
||||||
else:
|
else:
|
||||||
raise Exception(f'The model: {model} does not exist')
|
raise ValueError(f'The model: {model} does not exist')
|
||||||
|
|
||||||
if not provider:
|
if not provider:
|
||||||
provider = model.best_provider
|
provider = model.best_provider
|
||||||
|
|
||||||
if not provider:
|
if not provider:
|
||||||
raise Exception(f'No provider found for model: {model}')
|
raise RuntimeError(f'No provider found for model: {model}')
|
||||||
|
|
||||||
if not provider.working:
|
if not provider.working:
|
||||||
raise Exception(f'{provider.__name__} is not working')
|
raise RuntimeError(f'{provider.__name__} is not working')
|
||||||
|
|
||||||
if not provider.supports_stream and stream:
|
if not provider.supports_stream and stream:
|
||||||
raise Exception(f'ValueError: {provider.__name__} does not support "stream" argument')
|
raise ValueError(f'{provider.__name__} does not support "stream" argument')
|
||||||
|
|
||||||
if logging:
|
if logging:
|
||||||
print(f'Using {provider.__name__} provider')
|
print(f'Using {provider.__name__} provider')
|
||||||
@ -48,17 +48,20 @@ def get_model_and_provider(model : Union[Model, str],
|
|||||||
|
|
||||||
class ChatCompletion:
|
class ChatCompletion:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create(model: Union[Model, str],
|
def create(
|
||||||
messages : list[dict[str, str]],
|
model: Union[Model, str],
|
||||||
|
messages : Messages,
|
||||||
provider : Union[type[BaseProvider], None] = None,
|
provider : Union[type[BaseProvider], None] = None,
|
||||||
stream : bool = False,
|
stream : bool = False,
|
||||||
auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]:
|
auth : Union[str, None] = None,
|
||||||
|
**kwargs
|
||||||
|
) -> Union[CreateResult, str]:
|
||||||
|
|
||||||
model, provider = get_model_and_provider(model, provider, stream)
|
model, provider = get_model_and_provider(model, provider, stream)
|
||||||
|
|
||||||
if provider.needs_auth and not auth:
|
if provider.needs_auth and not auth:
|
||||||
raise Exception(
|
raise ValueError(
|
||||||
f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
|
f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
|
||||||
|
|
||||||
if provider.needs_auth:
|
if provider.needs_auth:
|
||||||
kwargs['auth'] = auth
|
kwargs['auth'] = auth
|
||||||
@ -69,10 +72,14 @@ class ChatCompletion:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
async def create_async(
|
async def create_async(
|
||||||
model: Union[Model, str],
|
model: Union[Model, str],
|
||||||
messages: list[dict[str, str]],
|
messages: Messages,
|
||||||
provider: Union[type[BaseProvider], None] = None,
|
provider: Union[type[BaseProvider], None] = None,
|
||||||
|
stream: bool = False,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
if stream:
|
||||||
|
raise ValueError(f'"create_async" does not support "stream" argument')
|
||||||
|
|
||||||
model, provider = get_model_and_provider(model, provider, False)
|
model, provider = get_model_and_provider(model, provider, False)
|
||||||
|
|
||||||
return await provider.create_async(model.name, messages, **kwargs)
|
return await provider.create_async(model.name, messages, **kwargs)
|
||||||
|
Loading…
Reference in New Issue
Block a user