mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-28 21:53:11 +03:00
Merge remote-tracking branch 'upstream/main' into api-ignore-providers
This commit is contained in:
commit
2b3bc749b3
18
README.md
18
README.md
@ -2,7 +2,7 @@
|
||||
|
||||
By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, reuploads made by other users, or anything else related to gpt4free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
|
||||
|
||||
- latest pypi version: [`0.1.7.3`](https://pypi.org/project/g4f/0.1.7.3)
|
||||
- latest pypi version: [`0.1.7.5`](https://pypi.org/project/g4f/0.1.7.5)
|
||||
```sh
|
||||
pip install -U g4f
|
||||
```
|
||||
@ -175,7 +175,7 @@ docker compose down
|
||||
```py
|
||||
import g4f
|
||||
|
||||
g4f.logging = True # enable logging
|
||||
g4f.debug.logging = True # enable logging
|
||||
g4f.check_version = False # Disable automatic version checking
|
||||
print(g4f.version) # check version
|
||||
print(g4f.Provider.Ails.params) # supported args
|
||||
@ -226,22 +226,13 @@ import g4f
|
||||
|
||||
from g4f.Provider import (
|
||||
AItianhu,
|
||||
Acytoo,
|
||||
Aichat,
|
||||
Ails,
|
||||
Bard,
|
||||
Bing,
|
||||
ChatBase,
|
||||
ChatgptAi,
|
||||
H2o,
|
||||
HuggingChat,
|
||||
OpenAssistant,
|
||||
OpenaiChat,
|
||||
Raycast,
|
||||
Theb,
|
||||
Vercel,
|
||||
Vitalentum,
|
||||
Ylokh,
|
||||
You,
|
||||
Yqcloud,
|
||||
)
|
||||
@ -324,9 +315,9 @@ async def run_all():
|
||||
asyncio.run(run_all())
|
||||
```
|
||||
|
||||
##### Proxy Support:
|
||||
##### Proxy and Timeout Support:
|
||||
|
||||
All providers support specifying a proxy in the create functions.
|
||||
All providers support specifying a proxy and increasing timeout in the create functions.
|
||||
|
||||
```py
|
||||
import g4f
|
||||
@ -336,6 +327,7 @@ response = g4f.ChatCompletion.create(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
proxy="http://host:port",
|
||||
# or socks5://user:pass@host:port
|
||||
timeout=120, # in secs
|
||||
)
|
||||
|
||||
print(f"Result:", response)
|
||||
|
@ -6,7 +6,7 @@ sys.path.append(str(Path(__file__).parent.parent))
|
||||
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||
|
||||
import g4f
|
||||
from testing.test_providers import get_providers
|
||||
from testing._providers import get_providers
|
||||
from testing.log_time import log_time_async
|
||||
|
||||
async def create_async(provider):
|
||||
|
@ -100,4 +100,4 @@ def main():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random, json
|
||||
from ..debug import logging
|
||||
from .. import debug
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import StreamSession
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
@ -36,7 +36,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
|
||||
rand = ''.join(random.choice(chars) for _ in range(6))
|
||||
domain = f"{rand}.{domains[model]}"
|
||||
|
||||
if logging:
|
||||
if debug.logging:
|
||||
print(f"AItianhuSpace | using domain: {domain}")
|
||||
|
||||
if not cookies:
|
||||
|
@ -1,10 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import string
|
||||
import random
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import io
|
||||
import base64
|
||||
import numpy as np
|
||||
import uuid
|
||||
import urllib.parse
|
||||
from PIL import Image
|
||||
from aiohttp import ClientSession, ClientTimeout
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
@ -35,6 +41,7 @@ class Bing(AsyncGeneratorProvider):
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
tone: str = Tones.creative,
|
||||
image: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if len(messages) < 2:
|
||||
@ -46,7 +53,7 @@ class Bing(AsyncGeneratorProvider):
|
||||
|
||||
if not cookies or "SRCHD" not in cookies:
|
||||
cookies = default_cookies
|
||||
return stream_generate(prompt, tone, context, proxy, cookies)
|
||||
return stream_generate(prompt, tone, image, context, proxy, cookies)
|
||||
|
||||
def create_context(messages: Messages):
|
||||
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
|
||||
@ -54,14 +61,14 @@ def create_context(messages: Messages):
|
||||
return context
|
||||
|
||||
class Conversation():
|
||||
def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
|
||||
def __init__(self, conversationId: str, clientId: str, conversationSignature: str, imageInfo: dict=None) -> None:
|
||||
self.conversationId = conversationId
|
||||
self.clientId = clientId
|
||||
self.conversationSignature = conversationSignature
|
||||
self.imageInfo = imageInfo
|
||||
|
||||
async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
|
||||
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
|
||||
|
||||
async def create_conversation(session: ClientSession, tone: str, image: str = None, proxy: str = None) -> Conversation:
|
||||
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4'
|
||||
async with await session.get(url, proxy=proxy) as response:
|
||||
data = await response.json()
|
||||
|
||||
@ -71,8 +78,65 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
|
||||
|
||||
if not conversationId or not clientId or not conversationSignature:
|
||||
raise Exception('Failed to create conversation.')
|
||||
|
||||
return Conversation(conversationId, clientId, conversationSignature)
|
||||
conversation = Conversation(conversationId, clientId, conversationSignature, None)
|
||||
if isinstance(image,str):
|
||||
try:
|
||||
config = {
|
||||
"visualSearch": {
|
||||
"maxImagePixels": 360000,
|
||||
"imageCompressionRate": 0.7,
|
||||
"enableFaceBlurDebug": 0,
|
||||
}
|
||||
}
|
||||
is_data_uri_an_image(image)
|
||||
img_binary_data = extract_data_uri(image)
|
||||
is_accepted_format(img_binary_data)
|
||||
img = Image.open(io.BytesIO(img_binary_data))
|
||||
width, height = img.size
|
||||
max_image_pixels = config['visualSearch']['maxImagePixels']
|
||||
compression_rate = config['visualSearch']['imageCompressionRate']
|
||||
|
||||
if max_image_pixels / (width * height) < 1:
|
||||
new_width = int(width * np.sqrt(max_image_pixels / (width * height)))
|
||||
new_height = int(height * np.sqrt(max_image_pixels / (width * height)))
|
||||
else:
|
||||
new_width = width
|
||||
new_height = height
|
||||
try:
|
||||
orientation = get_orientation(img)
|
||||
except Exception:
|
||||
orientation = None
|
||||
new_img = process_image(orientation, img, new_width, new_height)
|
||||
new_img_binary_data = compress_image_to_base64(new_img, compression_rate)
|
||||
data, boundary = build_image_upload_api_payload(new_img_binary_data, conversation, tone)
|
||||
headers = session.headers.copy()
|
||||
headers["content-type"] = 'multipart/form-data; boundary=' + boundary
|
||||
headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
|
||||
headers["origin"] = 'https://www.bing.com'
|
||||
async with await session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
|
||||
if image_upload_response.status == 200:
|
||||
image_info = await image_upload_response.json()
|
||||
result = {}
|
||||
if image_info.get('blobId'):
|
||||
result['bcid'] = image_info.get('blobId', "")
|
||||
result['blurredBcid'] = image_info.get('processedBlobId', "")
|
||||
if result['blurredBcid'] != "":
|
||||
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
|
||||
elif result['bcid'] != "":
|
||||
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
|
||||
if config['visualSearch']["enableFaceBlurDebug"]:
|
||||
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
|
||||
else:
|
||||
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
|
||||
conversation.imageInfo = result
|
||||
else:
|
||||
raise Exception("Failed to parse image info.")
|
||||
else:
|
||||
raise Exception("Failed to upload image.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error happened while trying to send image: {str(e)}")
|
||||
return conversation
|
||||
|
||||
async def list_conversations(session: ClientSession) -> list:
|
||||
url = "https://www.bing.com/turing/conversation/chats"
|
||||
@ -98,37 +162,47 @@ class Defaults:
|
||||
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
||||
|
||||
allowedMessageTypes = [
|
||||
"ActionRequest",
|
||||
"Chat",
|
||||
"Context",
|
||||
"Disengaged",
|
||||
"Progress",
|
||||
"AdsQuery",
|
||||
"SemanticSerp",
|
||||
"GenerateContentQuery",
|
||||
"SearchQuery",
|
||||
"ActionRequest",
|
||||
"Context",
|
||||
"Progress",
|
||||
"AdsQuery",
|
||||
"SemanticSerp",
|
||||
# The following message types should not be added so that it does not flood with
|
||||
# useless messages (such as "Analyzing images" or "Searching the web") while it's retrieving the AI response
|
||||
# "InternalSearchQuery",
|
||||
# "InternalSearchResult",
|
||||
# Not entirely certain about these two, but these parameters may be used for real-time markdown rendering.
|
||||
# Keeping them could potentially complicate the retrieval of the messages because link references written while
|
||||
# the AI is responding would then be moved to the very end of its message.
|
||||
# "RenderCardRequest",
|
||||
# "RenderContentRequest"
|
||||
]
|
||||
|
||||
sliceIds = [
|
||||
"winmuid3tf",
|
||||
"osbsdusgreccf",
|
||||
"ttstmout",
|
||||
"crchatrev",
|
||||
"winlongmsgtf",
|
||||
"ctrlworkpay",
|
||||
"norespwtf",
|
||||
"tempcacheread",
|
||||
"temptacache",
|
||||
"505scss0",
|
||||
"508jbcars0",
|
||||
"515enbotdets0",
|
||||
"5082tsports",
|
||||
"515vaoprvs",
|
||||
"424dagslnv1s0",
|
||||
"kcimgattcf",
|
||||
"427startpms0",
|
||||
"wrapuxslimt5",
|
||||
"wrapalgo",
|
||||
"wraptopalgo",
|
||||
"st14",
|
||||
"arankr1_1_9_9",
|
||||
"0731ziv2s0",
|
||||
"voiceall",
|
||||
"1015onstblg",
|
||||
"vsspec",
|
||||
"cacdiscf",
|
||||
"909ajcopus0",
|
||||
"scpbfmob",
|
||||
"rwt1",
|
||||
"cacmuidarb",
|
||||
"sappdlpt",
|
||||
"917fluxv14",
|
||||
"delaygc",
|
||||
"remsaconn3p",
|
||||
"splitcss3p",
|
||||
"sydconfigoptt"
|
||||
]
|
||||
|
||||
location = {
|
||||
@ -173,27 +247,128 @@ class Defaults:
|
||||
}
|
||||
|
||||
optionsSets = [
|
||||
'saharasugg',
|
||||
'enablenewsfc',
|
||||
'clgalileo',
|
||||
'gencontentv3',
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
"h3precise"
|
||||
"dtappid",
|
||||
"cricinfo",
|
||||
"cricinfov2",
|
||||
"dv3sugg",
|
||||
"nojbfedge"
|
||||
"iyxapbing",
|
||||
"iycapbing",
|
||||
"h3imaginative",
|
||||
"clgalileo",
|
||||
"gencontentv3",
|
||||
"fluxv14",
|
||||
"eredirecturl"
|
||||
]
|
||||
|
||||
def format_message(msg: dict) -> str:
|
||||
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
||||
|
||||
def build_image_upload_api_payload(image_bin: str, conversation: Conversation, tone: str):
|
||||
payload = {
|
||||
'invokedSkills': ["ImageById"],
|
||||
'subscriptionId': "Bing.Chat.Multimodal",
|
||||
'invokedSkillsRequestData': {
|
||||
'enableFaceBlur': True
|
||||
},
|
||||
'convoData': {
|
||||
'convoid': "",
|
||||
'convotone': tone
|
||||
}
|
||||
}
|
||||
knowledge_request = {
|
||||
'imageInfo': {},
|
||||
'knowledgeRequest': payload
|
||||
}
|
||||
boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
||||
data = '--' + boundary + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n' + json.dumps(knowledge_request,ensure_ascii=False) + "\r\n--" + boundary + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n' + image_bin + "\r\n--" + boundary + "--\r\n"
|
||||
return data, boundary
|
||||
|
||||
def is_data_uri_an_image(data_uri):
|
||||
try:
|
||||
# Check if the data URI starts with 'data:image' and contains an image format (e.g., jpeg, png, gif)
|
||||
if not re.match(r'data:image/(\w+);base64,', data_uri):
|
||||
raise ValueError("Invalid data URI image.")
|
||||
# Extract the image format from the data URI
|
||||
image_format = re.match(r'data:image/(\w+);base64,', data_uri).group(1)
|
||||
# Check if the image format is one of the allowed formats (jpg, jpeg, png, gif)
|
||||
if image_format.lower() not in ['jpeg', 'jpg', 'png', 'gif']:
|
||||
raise ValueError("Invalid image format (from mime file type).")
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def is_accepted_format(binary_data):
|
||||
try:
|
||||
check = False
|
||||
if binary_data.startswith(b'\xFF\xD8\xFF'):
|
||||
check = True # It's a JPEG image
|
||||
elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'):
|
||||
check = True # It's a PNG image
|
||||
elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'):
|
||||
check = True # It's a GIF image
|
||||
elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'):
|
||||
check = True # It's a JPEG image
|
||||
elif binary_data.startswith(b'\xFF\xD8'):
|
||||
check = True # It's a JPEG image
|
||||
elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP':
|
||||
check = True # It's a WebP image
|
||||
# else we raise ValueError
|
||||
if not check:
|
||||
raise ValueError("Invalid image format (from magic code).")
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def extract_data_uri(data_uri):
|
||||
try:
|
||||
data = data_uri.split(",")[1]
|
||||
data = base64.b64decode(data)
|
||||
return data
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def get_orientation(data: bytes):
|
||||
try:
|
||||
if data[0:2] != b'\xFF\xD8':
|
||||
raise Exception('NotJpeg')
|
||||
with Image.open(data) as img:
|
||||
exif_data = img._getexif()
|
||||
if exif_data is not None:
|
||||
orientation = exif_data.get(274) # 274 corresponds to the orientation tag in EXIF
|
||||
if orientation is not None:
|
||||
return orientation
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def process_image(orientation, img, new_width, new_height):
|
||||
try:
|
||||
# Initialize the canvas
|
||||
new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF")
|
||||
if orientation:
|
||||
if orientation > 4:
|
||||
img = img.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
if orientation == 3 or orientation == 4:
|
||||
img = img.transpose(Image.ROTATE_180)
|
||||
if orientation == 5 or orientation == 6:
|
||||
img = img.transpose(Image.ROTATE_270)
|
||||
if orientation == 7 or orientation == 8:
|
||||
img = img.transpose(Image.ROTATE_90)
|
||||
new_img.paste(img, (0, 0))
|
||||
return new_img
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def compress_image_to_base64(img, compression_rate):
|
||||
try:
|
||||
output_buffer = io.BytesIO()
|
||||
img.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
|
||||
base64_image = base64.b64encode(output_buffer.getvalue()).decode('utf-8')
|
||||
return base64_image
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
|
||||
|
||||
request_id = str(uuid.uuid4())
|
||||
struct = {
|
||||
'arguments': [
|
||||
@ -213,6 +388,7 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
|
||||
'requestId': request_id,
|
||||
'messageId': request_id,
|
||||
},
|
||||
"scenario": "SERP",
|
||||
'tone': tone,
|
||||
'spokenTextMode': 'None',
|
||||
'conversationId': conversation.conversationId,
|
||||
@ -225,7 +401,11 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
|
||||
'target': 'chat',
|
||||
'type': 4
|
||||
}
|
||||
|
||||
if conversation.imageInfo != None and "imageUrl" in conversation.imageInfo and "originalImageUrl" in conversation.imageInfo:
|
||||
struct['arguments'][0]['message']['originalImageUrl'] = conversation.imageInfo['originalImageUrl']
|
||||
struct['arguments'][0]['message']['imageUrl'] = conversation.imageInfo['imageUrl']
|
||||
struct['arguments'][0]['experienceType'] = None
|
||||
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
|
||||
if context:
|
||||
struct['arguments'][0]['previousMessages'] = [{
|
||||
"author": "user",
|
||||
@ -239,6 +419,7 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
|
||||
async def stream_generate(
|
||||
prompt: str,
|
||||
tone: str,
|
||||
image: str = None,
|
||||
context: str = None,
|
||||
proxy: str = None,
|
||||
cookies: dict = None
|
||||
@ -248,7 +429,7 @@ async def stream_generate(
|
||||
cookies=cookies,
|
||||
headers=Defaults.headers,
|
||||
) as session:
|
||||
conversation = await create_conversation(session, proxy)
|
||||
conversation = await create_conversation(session, tone, image, proxy)
|
||||
try:
|
||||
async with session.ws_connect(
|
||||
f'wss://sydney.bing.com/sydney/ChatHub',
|
||||
@ -264,7 +445,6 @@ async def stream_generate(
|
||||
response_txt = ''
|
||||
returned_text = ''
|
||||
final = False
|
||||
|
||||
while not final:
|
||||
msg = await wss.receive(timeout=900)
|
||||
objects = msg.data.split(Defaults.delimiter)
|
||||
@ -299,4 +479,4 @@ async def stream_generate(
|
||||
raise Exception(f"{result['value']}: {result['message']}")
|
||||
return
|
||||
finally:
|
||||
await delete_conversation(session, conversation, proxy)
|
||||
await delete_conversation(session, conversation, proxy)
|
||||
|
@ -10,7 +10,7 @@ from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
class ChatForAi(AsyncGeneratorProvider):
|
||||
url = "https://chatforai.store"
|
||||
working = False
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
|
@ -1,36 +1,34 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import re, html, json, string, random
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import Messages
|
||||
from .base_provider import AsyncProvider, format_prompt
|
||||
from ..typing import Messages, AsyncResult
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class ChatgptAi(AsyncProvider):
|
||||
url: str = "https://chatgpt.ai/"
|
||||
class ChatgptAi(AsyncGeneratorProvider):
|
||||
url: str = "https://chatgpt.ai"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
_nonce = None
|
||||
_post_id = None
|
||||
_bot_id = None
|
||||
_system = None
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"authority" : "chatgpt.ai",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"accept-language" : "en-US",
|
||||
"cache-control" : "no-cache",
|
||||
"origin" : "https://chatgpt.ai",
|
||||
"origin" : cls.url,
|
||||
"pragma" : "no-cache",
|
||||
"referer" : cls.url,
|
||||
"referer" : f"{cls.url}/",
|
||||
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"Windows"',
|
||||
@ -42,34 +40,40 @@ class ChatgptAi(AsyncProvider):
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
if not cls._nonce:
|
||||
if not cls._system:
|
||||
async with session.get(cls.url, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
text = await response.text()
|
||||
result = re.search(r'data-nonce="(.*?)"', text)
|
||||
result = re.search(r"data-system='(.*?)'", text)
|
||||
if result:
|
||||
cls._nonce = result.group(1)
|
||||
result = re.search(r'data-post-id="(.*?)"', text)
|
||||
if result:
|
||||
cls._post_id = result.group(1)
|
||||
result = re.search(r'data-bot-id="(.*?)"', text)
|
||||
if result:
|
||||
cls._bot_id = result.group(1)
|
||||
if not cls._nonce or not cls._post_id or not cls._bot_id:
|
||||
raise RuntimeError("Nonce, post-id or bot-id not found")
|
||||
|
||||
cls._system = json.loads(html.unescape(result.group(1)))
|
||||
if not cls._system:
|
||||
raise RuntimeError("System args not found")
|
||||
|
||||
data = {
|
||||
"_wpnonce": cls._nonce,
|
||||
"post_id": cls._post_id,
|
||||
"url": "https://chatgpt.ai",
|
||||
"action": "wpaicg_chat_shortcode_message",
|
||||
"message": format_prompt(messages),
|
||||
"bot_id": cls._bot_id
|
||||
"botId": cls._system["botId"],
|
||||
"customId": cls._system["customId"],
|
||||
"session": cls._system["sessionId"],
|
||||
"chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)),
|
||||
"contextId": cls._system["contextId"],
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(
|
||||
"https://chatgpt.ai/wp-admin/admin-ajax.php",
|
||||
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
|
||||
proxy=proxy,
|
||||
data=data
|
||||
json=data
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
return (await response.json())["data"]
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
try:
|
||||
line = json.loads(line[6:])
|
||||
assert "type" in line
|
||||
except:
|
||||
raise RuntimeError(f"Broken line: {line.decode()}")
|
||||
if line["type"] == "live":
|
||||
yield line["data"]
|
||||
elif line["type"] == "end":
|
||||
break
|
@ -14,7 +14,7 @@ from .helper import format_prompt, get_cookies
|
||||
class ChatgptFree(AsyncProvider):
|
||||
url = "https://chatgptfree.ai"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
working = False
|
||||
_post_id = None
|
||||
_nonce = None
|
||||
|
||||
@ -24,6 +24,7 @@ class ChatgptFree(AsyncProvider):
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
@ -45,14 +46,19 @@ class ChatgptFree(AsyncProvider):
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
async with StreamSession(headers=headers,
|
||||
impersonate="chrome107", proxies={"https": proxy}, timeout=10) as session:
|
||||
async with StreamSession(
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
impersonate="chrome107",
|
||||
proxies={"https": proxy},
|
||||
timeout=timeout
|
||||
) as session:
|
||||
|
||||
if not cls._nonce:
|
||||
async with session.get(f"{cls.url}/", cookies=cookies) as response:
|
||||
async with session.get(f"{cls.url}/") as response:
|
||||
|
||||
response.raise_for_status()
|
||||
response = await response.text()
|
||||
|
94
g4f/Provider/FakeGpt.py
Normal file
94
g4f/Provider/FakeGpt.py
Normal file
@ -0,0 +1,94 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid, time, random, string, json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class FakeGpt(AsyncGeneratorProvider):
|
||||
url = "https://chat-shared2.zhile.io"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
_access_token = None
|
||||
_cookie_jar = None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"Accept-Language": "en-US",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
|
||||
"Referer": "https://chat-shared2.zhile.io/?v=2",
|
||||
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
}
|
||||
async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
|
||||
if not cls._access_token:
|
||||
async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
list = (await response.json())["loads"]
|
||||
token_ids = [t["token_id"] for t in list if t["count"] == 0]
|
||||
data = {
|
||||
"token_key": random.choice(token_ids),
|
||||
"session_password": random_string()
|
||||
}
|
||||
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
cls._access_token = (await response.json())["accessToken"]
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "text/event-stream",
|
||||
"X-Authorization": f"Bearer {cls._access_token}",
|
||||
}
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"action": "next",
|
||||
"messages": [
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"author": {"role": "user"},
|
||||
"content": {"content_type": "text", "parts": [prompt]},
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
"parent_message_id": str(uuid.uuid4()),
|
||||
"model": "text-davinci-002-render-sha",
|
||||
"plugin_ids": [],
|
||||
"timezone_offset_min": -120,
|
||||
"suggestions": [],
|
||||
"history_and_training_disabled": True,
|
||||
"arkose_token": "",
|
||||
"force_paragen": False,
|
||||
}
|
||||
last_message = ""
|
||||
async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = line[6:]
|
||||
if line == b"[DONE]":
|
||||
break
|
||||
try:
|
||||
line = json.loads(line)
|
||||
if line["message"]["metadata"]["message_type"] == "next":
|
||||
new_message = line["message"]["content"]["parts"][0]
|
||||
yield new_message[len(last_message):]
|
||||
last_message = new_message
|
||||
except:
|
||||
continue
|
||||
if not last_message:
|
||||
raise RuntimeError("No valid response")
|
||||
|
||||
def random_string(length: int = 10):
|
||||
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
|
@ -41,7 +41,10 @@ class FreeGpt(AsyncGeneratorProvider):
|
||||
async with session.post(f"{url}/api/generate", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.iter_content():
|
||||
yield chunk.decode()
|
||||
chunk = chunk.decode()
|
||||
if chunk == "当前地区当日额度已消耗完":
|
||||
raise RuntimeError("Rate limit reached")
|
||||
yield chunk
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
|
@ -14,19 +14,23 @@ class GeekGpt(BaseProvider):
|
||||
supports_gpt_4 = True
|
||||
|
||||
@classmethod
|
||||
def create_completion(cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool, **kwargs) -> CreateResult:
|
||||
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
json_data = {
|
||||
'messages': messages,
|
||||
'model': model,
|
||||
'temperature': kwargs.get('temperature', 0.9),
|
||||
'presence_penalty': kwargs.get('presence_penalty', 0),
|
||||
'top_p': kwargs.get('top_p', 1),
|
||||
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
||||
'stream': True
|
||||
'model': model,
|
||||
'temperature': kwargs.get('temperature', 0.9),
|
||||
'presence_penalty': kwargs.get('presence_penalty', 0),
|
||||
'top_p': kwargs.get('top_p', 1),
|
||||
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
||||
'stream': True
|
||||
}
|
||||
|
||||
data = dumps(json_data, separators=(',', ':'))
|
||||
@ -61,7 +65,6 @@ class GeekGpt(BaseProvider):
|
||||
|
||||
try:
|
||||
content = json.loads(json_data)["choices"][0]["delta"].get("content")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f'error | {e} :', json_data)
|
||||
|
||||
|
79
g4f/Provider/Hashnode.py
Normal file
79
g4f/Provider/Hashnode.py
Normal file
@ -0,0 +1,79 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
class SearchTypes():
|
||||
quick = "quick"
|
||||
code = "code"
|
||||
websearch = "websearch"
|
||||
|
||||
class Hashnode(AsyncGeneratorProvider):
|
||||
url = "https://hashnode.com"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
_sources = []
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
search_type: str = SearchTypes.websearch,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/rix",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
"TE": "trailers",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = messages[-1]["content"]
|
||||
cls._sources = []
|
||||
if search_type == "websearch":
|
||||
async with session.post(
|
||||
f"{cls.url}/api/ai/rix/search",
|
||||
json={"prompt": prompt},
|
||||
proxy=proxy,
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._sources = (await response.json())["result"]
|
||||
data = {
|
||||
"chatId": secrets.token_hex(16).zfill(32),
|
||||
"history": messages,
|
||||
"prompt": prompt,
|
||||
"searchType": search_type,
|
||||
"urlToScan": None,
|
||||
"searchResults": cls._sources,
|
||||
}
|
||||
async with session.post(
|
||||
f"{cls.url}/api/ai/rix/completion",
|
||||
json=data,
|
||||
proxy=proxy,
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content.iter_any():
|
||||
if chunk:
|
||||
yield chunk.decode()
|
||||
|
||||
@classmethod
|
||||
def get_sources(cls) -> list:
|
||||
return [{
|
||||
"title": source["name"],
|
||||
"url": source["url"]
|
||||
} for source in cls._sources]
|
@ -30,7 +30,7 @@ models = {
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider):
|
||||
url = "https://liaobots.site"
|
||||
working = False
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
_auth_code = None
|
||||
|
89
g4f/Provider/MyShell.py
Normal file
89
g4f/Provider/MyShell.py
Normal file
@ -0,0 +1,89 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time, random, json
|
||||
|
||||
from ..requests import StreamSession
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class MyShell(AsyncGeneratorProvider):
|
||||
url = "https://app.myshell.ai/chat"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
|
||||
headers = {
|
||||
"User-Agent": user_agent,
|
||||
"Myshell-Service-Name": "organics-api",
|
||||
"Visitor-Id": generate_visitor_id(user_agent)
|
||||
}
|
||||
async with StreamSession(
|
||||
impersonate="chrome107",
|
||||
proxies={"https": proxy},
|
||||
timeout=timeout,
|
||||
headers=headers
|
||||
) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"botId": "1",
|
||||
"conversation_scenario": 3,
|
||||
"message": prompt,
|
||||
"messageType": 1
|
||||
}
|
||||
async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response:
|
||||
response.raise_for_status()
|
||||
event = None
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"event: "):
|
||||
event = line[7:]
|
||||
elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT":
|
||||
if line.startswith(b"data: "):
|
||||
yield json.loads(line[6:])["content"]
|
||||
if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED":
|
||||
break
|
||||
|
||||
|
||||
def xor_hash(B: str):
|
||||
r = []
|
||||
i = 0
|
||||
|
||||
def o(e, t):
|
||||
o_val = 0
|
||||
for i in range(len(t)):
|
||||
o_val |= r[i] << (8 * i)
|
||||
return e ^ o_val
|
||||
|
||||
for e in range(len(B)):
|
||||
t = ord(B[e])
|
||||
r.insert(0, 255 & t)
|
||||
|
||||
if len(r) >= 4:
|
||||
i = o(i, r)
|
||||
r = []
|
||||
|
||||
if len(r) > 0:
|
||||
i = o(i, r)
|
||||
|
||||
return hex(i)[2:]
|
||||
|
||||
def performance() -> str:
|
||||
t = int(time.time() * 1000)
|
||||
e = 0
|
||||
while t == int(time.time() * 1000):
|
||||
e += 1
|
||||
return hex(t)[2:] + hex(e)[2:]
|
||||
|
||||
def generate_visitor_id(user_agent: str) -> str:
|
||||
f = performance()
|
||||
r = hex(int(random.random() * (16**16)))[2:-2]
|
||||
d = xor_hash(user_agent)
|
||||
e = hex(1080 * 1920)[2:]
|
||||
return f"{f}-{r}-{d}-{e}-{f}"
|
@ -61,6 +61,8 @@ class NoowAi(AsyncGeneratorProvider):
|
||||
yield line["data"]
|
||||
elif line["type"] == "end":
|
||||
break
|
||||
elif line["type"] == "error":
|
||||
raise RuntimeError(line["data"])
|
||||
|
||||
def random_string(length: int = 10):
|
||||
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
|
@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import random, string
|
||||
from datetime import datetime
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
@ -22,7 +22,7 @@ class Phind(AsyncGeneratorProvider):
|
||||
timeout: int = 120,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
||||
chars = string.ascii_lowercase + string.digits
|
||||
user_id = ''.join(random.choice(chars) for _ in range(24))
|
||||
data = {
|
||||
"question": format_prompt(messages),
|
||||
|
@ -4,7 +4,6 @@ import json, base64, requests, execjs, random, uuid
|
||||
|
||||
from ..typing import Messages, TypedDict, CreateResult, Any
|
||||
from .base_provider import BaseProvider
|
||||
from abc import abstractmethod
|
||||
from ..debug import logging
|
||||
|
||||
|
||||
@ -15,12 +14,13 @@ class Vercel(BaseProvider):
|
||||
supports_stream = True
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
proxy: str = None, **kwargs) -> CreateResult:
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
@ -65,7 +65,7 @@ class Vercel(BaseProvider):
|
||||
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except Exception:
|
||||
except:
|
||||
continue
|
||||
for token in response.iter_content(chunk_size=None):
|
||||
yield token.decode()
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
from aiohttp import ClientSession
|
||||
from ..requests import StreamSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
class Yqcloud(AsyncGeneratorProvider):
|
||||
url = "https://chat9.yqcloud.top/"
|
||||
working = False
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
@ -17,15 +17,16 @@ class Yqcloud(AsyncGeneratorProvider):
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
**kwargs,
|
||||
) -> AsyncResult:
|
||||
async with ClientSession(
|
||||
headers=_create_header()
|
||||
async with StreamSession(
|
||||
headers=_create_header(), proxies={"https": proxy}, timeout=timeout
|
||||
) as session:
|
||||
payload = _create_payload(messages, **kwargs)
|
||||
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
||||
async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content.iter_any():
|
||||
async for chunk in response.iter_content():
|
||||
if chunk:
|
||||
chunk = chunk.decode()
|
||||
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
|
||||
@ -38,6 +39,7 @@ def _create_header():
|
||||
"accept" : "application/json, text/plain, */*",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat9.yqcloud.top",
|
||||
"referer" : "https://chat9.yqcloud.top/"
|
||||
}
|
||||
|
||||
|
||||
|
@ -17,15 +17,17 @@ from .ChatgptFree import ChatgptFree
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .ChatgptX import ChatgptX
|
||||
from .Cromicle import Cromicle
|
||||
from .FakeGpt import FakeGpt
|
||||
from .FreeGpt import FreeGpt
|
||||
from .GPTalk import GPTalk
|
||||
from .GptChatly import GptChatly
|
||||
from .GptForLove import GptForLove
|
||||
from .GptGo import GptGo
|
||||
from .GptGod import GptGod
|
||||
from .Hashnode import Hashnode
|
||||
from .Liaobots import Liaobots
|
||||
from .Llama2 import Llama2
|
||||
from .Myshell import Myshell
|
||||
from .MyShell import MyShell
|
||||
from .NoowAi import NoowAi
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .Phind import Phind
|
||||
@ -73,6 +75,7 @@ class ProviderUtils:
|
||||
'Equing': Equing,
|
||||
'FastGpt': FastGpt,
|
||||
'Forefront': Forefront,
|
||||
'FakeGpt': FakeGpt,
|
||||
'FreeGpt': FreeGpt,
|
||||
'GPTalk': GPTalk,
|
||||
'GptChatly': GptChatly,
|
||||
@ -80,6 +83,7 @@ class ProviderUtils:
|
||||
'GptForLove': GptForLove,
|
||||
'GptGo': GptGo,
|
||||
'GptGod': GptGod,
|
||||
'Hashnode': Hashnode,
|
||||
'H2o': H2o,
|
||||
'HuggingChat': HuggingChat,
|
||||
'Komo': Komo,
|
||||
@ -88,6 +92,7 @@ class ProviderUtils:
|
||||
'Lockchat': Lockchat,
|
||||
'MikuChat': MikuChat,
|
||||
'Myshell': Myshell,
|
||||
'MyShell': MyShell,
|
||||
'NoowAi': NoowAi,
|
||||
'Opchatgpts': Opchatgpts,
|
||||
'OpenAssistant': OpenAssistant,
|
||||
@ -143,6 +148,7 @@ __all__ = [
|
||||
'DfeHub',
|
||||
'EasyChat',
|
||||
'Forefront',
|
||||
'FakeGpt',
|
||||
'FreeGpt',
|
||||
'GPTalk',
|
||||
'GptChatly',
|
||||
@ -150,12 +156,14 @@ __all__ = [
|
||||
'GetGpt',
|
||||
'GptGo',
|
||||
'GptGod',
|
||||
'Hashnode',
|
||||
'H2o',
|
||||
'HuggingChat',
|
||||
'Liaobots',
|
||||
'Llama2',
|
||||
'Lockchat',
|
||||
'Myshell',
|
||||
'MyShell',
|
||||
'NoowAi',
|
||||
'Opchatgpts',
|
||||
'Raycast',
|
||||
|
@ -8,8 +8,8 @@ from aiohttp import ClientSession
|
||||
from aiohttp.http import WSMsgType
|
||||
import asyncio
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
|
||||
models = {
|
||||
@ -174,46 +174,4 @@ def generate_visitor_id(user_agent: str) -> str:
|
||||
r = hex(int(random.random() * (16**16)))[2:-2]
|
||||
d = xor_hash(user_agent)
|
||||
e = hex(1080 * 1920)[2:]
|
||||
return f"{f}-{r}-{d}-{e}-{f}"
|
||||
|
||||
|
||||
|
||||
# update
|
||||
# from g4f.requests import StreamSession
|
||||
|
||||
# async def main():
|
||||
# headers = {
|
||||
# 'authority': 'api.myshell.ai',
|
||||
# 'accept': 'application/json',
|
||||
# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
# 'content-type': 'application/json',
|
||||
# 'myshell-service-name': 'organics-api',
|
||||
# 'origin': 'https://app.myshell.ai',
|
||||
# 'referer': 'https://app.myshell.ai/',
|
||||
# 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
# 'sec-ch-ua-mobile': '?0',
|
||||
# 'sec-ch-ua-platform': '"macOS"',
|
||||
# 'sec-fetch-dest': 'empty',
|
||||
# 'sec-fetch-mode': 'cors',
|
||||
# 'sec-fetch-site': 'same-site',
|
||||
# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
|
||||
# 'visitor-id': '18ae8fe5d916d3-0213f29594b17f-18525634-157188-18ae8fe5d916d3',
|
||||
# }
|
||||
|
||||
# json_data = {
|
||||
# 'conversation_scenario': 3,
|
||||
# 'botId': '4738',
|
||||
# 'message': 'hi',
|
||||
# 'messageType': 1,
|
||||
# }
|
||||
|
||||
# async with StreamSession(headers=headers, impersonate="chrome110") as session:
|
||||
# async with session.post(f'https://api.myshell.ai/v1/bot/chat/send_message',
|
||||
# json=json_data) as response:
|
||||
|
||||
# response.raise_for_status()
|
||||
# async for chunk in response.iter_content():
|
||||
# print(chunk.decode("utf-8"))
|
||||
|
||||
# import asyncio
|
||||
# asyncio.run(main())
|
||||
return f"{f}-{r}-{d}-{e}-{f}"
|
@ -12,4 +12,5 @@ from .V50 import V50
|
||||
from .FastGpt import FastGpt
|
||||
from .Aivvm import Aivvm
|
||||
from .Vitalentum import Vitalentum
|
||||
from .H2o import H2o
|
||||
from .H2o import H2o
|
||||
from .Myshell import Myshell
|
@ -3,13 +3,14 @@ from __future__ import annotations
|
||||
import sys
|
||||
import asyncio
|
||||
import webbrowser
|
||||
import http.cookiejar
|
||||
|
||||
from os import path
|
||||
from asyncio import AbstractEventLoop
|
||||
from platformdirs import user_config_dir
|
||||
|
||||
from ..typing import Dict, Messages
|
||||
from browser_cookie3 import chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox, BrowserCookieError
|
||||
|
||||
from .. import debug
|
||||
|
||||
# Change event loop policy on windows
|
||||
if sys.platform == 'win32':
|
||||
@ -44,7 +45,6 @@ def get_event_loop() -> AbstractEventLoop:
|
||||
)
|
||||
|
||||
def init_cookies():
|
||||
|
||||
urls = [
|
||||
'https://chat-gpt.org',
|
||||
'https://www.aitianhu.com',
|
||||
@ -72,16 +72,26 @@ def init_cookies():
|
||||
# Load cookies for a domain from all supported browsers.
|
||||
# Cache the results in the "_cookies" variable.
|
||||
def get_cookies(domain_name=''):
|
||||
cj = http.cookiejar.CookieJar()
|
||||
for cookie_fn in [chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
|
||||
if domain_name in _cookies:
|
||||
return _cookies[domain_name]
|
||||
def g4f(domain_name):
|
||||
user_data_dir = user_config_dir("g4f")
|
||||
cookie_file = path.join(user_data_dir, "Default", "Cookies")
|
||||
if not path.exists(cookie_file):
|
||||
return []
|
||||
return chrome(cookie_file, domain_name)
|
||||
cookies = {}
|
||||
for cookie_fn in [g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
|
||||
try:
|
||||
for cookie in cookie_fn(domain_name=domain_name):
|
||||
cj.set_cookie(cookie)
|
||||
except BrowserCookieError:
|
||||
cookie_jar = cookie_fn(domain_name=domain_name)
|
||||
if len(cookie_jar) and debug.logging:
|
||||
print(f"Read cookies from {cookie_fn.__name__} for {domain_name}")
|
||||
for cookie in cookie_jar:
|
||||
if cookie.name not in cookies:
|
||||
cookies[cookie.name] = cookie.value
|
||||
except BrowserCookieError as e:
|
||||
pass
|
||||
|
||||
_cookies[domain_name] = {cookie.name: cookie.value for cookie in cj}
|
||||
|
||||
_cookies[domain_name] = cookies
|
||||
return _cookies[domain_name]
|
||||
|
||||
|
||||
@ -100,10 +110,8 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str:
|
||||
|
||||
def get_browser(user_data_dir: str = None):
|
||||
from undetected_chromedriver import Chrome
|
||||
from platformdirs import user_config_dir
|
||||
|
||||
if not user_data_dir:
|
||||
user_data_dir = user_config_dir("g4f")
|
||||
user_data_dir = path.join(user_data_dir, "Default")
|
||||
|
||||
return Chrome(user_data_dir=user_data_dir)
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import uuid, json, time
|
||||
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_browser, get_cookies, format_prompt
|
||||
from ..helper import get_browser, get_cookies, format_prompt, get_event_loop
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession
|
||||
|
||||
@ -73,26 +73,33 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||
last_message = new_message
|
||||
|
||||
@classmethod
|
||||
def browse_access_token(cls) -> str:
|
||||
try:
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
async def browse_access_token(cls) -> str:
|
||||
def browse() -> str:
|
||||
try:
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
|
||||
driver = get_browser()
|
||||
except ImportError:
|
||||
return
|
||||
driver = get_browser()
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
driver.get(f"{cls.url}/")
|
||||
try:
|
||||
WebDriverWait(driver, 1200).until(
|
||||
EC.presence_of_element_located((By.ID, "prompt-textarea"))
|
||||
)
|
||||
javascript = "return (await (await fetch('/api/auth/session')).json())['accessToken']"
|
||||
return driver.execute_script(javascript)
|
||||
finally:
|
||||
time.sleep(1)
|
||||
driver.quit()
|
||||
driver.get(f"{cls.url}/")
|
||||
try:
|
||||
WebDriverWait(driver, 1200).until(
|
||||
EC.presence_of_element_located((By.ID, "prompt-textarea"))
|
||||
)
|
||||
javascript = "return (await (await fetch('/api/auth/session')).json())['accessToken']"
|
||||
return driver.execute_script(javascript)
|
||||
finally:
|
||||
driver.close()
|
||||
time.sleep(0.1)
|
||||
driver.quit()
|
||||
loop = get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
browse
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def fetch_access_token(cls, cookies: dict, proxies: dict = None) -> str:
|
||||
@ -110,7 +117,7 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||
if cookies:
|
||||
cls._access_token = await cls.fetch_access_token(cookies, proxies)
|
||||
if not cls._access_token:
|
||||
cls._access_token = cls.browse_access_token()
|
||||
cls._access_token = await cls.browse_access_token()
|
||||
if not cls._access_token:
|
||||
raise RuntimeError("Read access token failed")
|
||||
return cls._access_token
|
||||
|
@ -5,13 +5,13 @@ import random
|
||||
from typing import List, Type, Dict
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider, AsyncProvider
|
||||
from .. import debug
|
||||
|
||||
|
||||
class RetryProvider(AsyncProvider):
|
||||
__name__: str = "RetryProvider"
|
||||
working: bool = True
|
||||
supports_stream: bool = True
|
||||
logging: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -39,7 +39,7 @@ class RetryProvider(AsyncProvider):
|
||||
started: bool = False
|
||||
for provider in providers:
|
||||
try:
|
||||
if self.logging:
|
||||
if debug.logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
@ -51,7 +51,7 @@ class RetryProvider(AsyncProvider):
|
||||
|
||||
except Exception as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if self.logging:
|
||||
if debug.logging:
|
||||
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
|
||||
if started:
|
||||
raise e
|
||||
@ -71,22 +71,21 @@ class RetryProvider(AsyncProvider):
|
||||
self.exceptions: Dict[str, Exception] = {}
|
||||
for provider in providers:
|
||||
try:
|
||||
return await asyncio.wait_for(provider.create_async(model, messages, **kwargs), timeout=60)
|
||||
except asyncio.TimeoutError as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if self.logging:
|
||||
print(f"{provider.__name__}: TimeoutError: {e}")
|
||||
return await asyncio.wait_for(
|
||||
provider.create_async(model, messages, **kwargs),
|
||||
timeout=kwargs.get("timeout", 60)
|
||||
)
|
||||
except Exception as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if self.logging:
|
||||
if debug.logging:
|
||||
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
|
||||
|
||||
self.raise_exceptions()
|
||||
|
||||
def raise_exceptions(self) -> None:
|
||||
if self.exceptions:
|
||||
raise RuntimeError("\n".join(["All providers failed:"] + [
|
||||
raise RuntimeError("\n".join(["RetryProvider failed:"] + [
|
||||
f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions
|
||||
]))
|
||||
|
||||
raise RuntimeError("No provider found")
|
||||
raise RuntimeError("RetryProvider: No provider found")
|
@ -3,9 +3,9 @@ from requests import get
|
||||
from .models import Model, ModelUtils, _all_models
|
||||
from .Provider import BaseProvider, RetryProvider
|
||||
from .typing import Messages, CreateResult, Union, List
|
||||
from .debug import logging
|
||||
from . import debug
|
||||
|
||||
version = '0.1.7.3'
|
||||
version = '0.1.7.5'
|
||||
version_check = True
|
||||
|
||||
def check_pypi_version() -> None:
|
||||
@ -46,8 +46,7 @@ def get_model_and_provider(model : Union[Model, str],
|
||||
if not provider.supports_stream and stream:
|
||||
raise ValueError(f'{provider.__name__} does not support "stream" argument')
|
||||
|
||||
if logging:
|
||||
RetryProvider.logging = True
|
||||
if debug.logging:
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
return model, provider
|
||||
|
@ -3,20 +3,14 @@ from dataclasses import dataclass
|
||||
from .typing import Union
|
||||
from .Provider import BaseProvider, RetryProvider
|
||||
from .Provider import (
|
||||
ChatgptLogin,
|
||||
ChatgptDemo,
|
||||
ChatgptDuo,
|
||||
GptForLove,
|
||||
Opchatgpts,
|
||||
ChatgptAi,
|
||||
GptChatly,
|
||||
Liaobots,
|
||||
ChatgptX,
|
||||
Yqcloud,
|
||||
ChatBase,
|
||||
GeekGpt,
|
||||
Myshell,
|
||||
FakeGpt,
|
||||
FreeGpt,
|
||||
Cromicle,
|
||||
NoowAi,
|
||||
Vercel,
|
||||
Aichat,
|
||||
@ -24,15 +18,10 @@ from .Provider import (
|
||||
AiAsk,
|
||||
GptGo,
|
||||
Phind,
|
||||
Ylokh,
|
||||
Bard,
|
||||
Aibn,
|
||||
Bing,
|
||||
You,
|
||||
H2o,
|
||||
|
||||
ChatForAi,
|
||||
ChatBase
|
||||
)
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
@ -50,9 +39,8 @@ default = Model(
|
||||
base_provider = "",
|
||||
best_provider = RetryProvider([
|
||||
Bing, # Not fully GPT 3 or 4
|
||||
Yqcloud, # Answers short questions in chinese
|
||||
ChatgptDuo, # Include search results
|
||||
Aibn, Aichat, ChatgptAi, ChatgptLogin, FreeGpt, GptGo, Myshell, Ylokh, GeekGpt
|
||||
AiAsk, Aichat, ChatgptAi, FreeGpt, GptGo, GeekGpt,
|
||||
Phind, You
|
||||
])
|
||||
)
|
||||
|
||||
@ -61,9 +49,10 @@ gpt_35_long = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
|
||||
GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
|
||||
NoowAi, GeekGpt, Phind
|
||||
AiAsk, Aichat, FreeGpt, You,
|
||||
GptChatly, GptForLove,
|
||||
NoowAi, GeekGpt, Phind,
|
||||
FakeGpt
|
||||
])
|
||||
)
|
||||
|
||||
@ -72,8 +61,8 @@ gpt_35_turbo = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider=RetryProvider([
|
||||
ChatgptX, ChatgptDemo, GptGo, You,
|
||||
NoowAi, GPTalk, GptForLove, Phind, ChatBase, Cromicle
|
||||
ChatgptX, GptGo, You,
|
||||
NoowAi, GPTalk, GptForLove, Phind, ChatBase
|
||||
])
|
||||
)
|
||||
|
||||
@ -81,7 +70,7 @@ gpt_4 = Model(
|
||||
name = 'gpt-4',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
Bing, GeekGpt, Liaobots, Phind
|
||||
Bing, GeekGpt, Phind
|
||||
])
|
||||
)
|
||||
|
||||
|
@ -15,4 +15,7 @@ nest_asyncio
|
||||
waitress
|
||||
werkzeug
|
||||
loguru
|
||||
tiktoken
|
||||
tiktoken
|
||||
pillow
|
||||
platformdirs
|
||||
numpy
|
2
setup.py
2
setup.py
@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
|
||||
with open("requirements.txt") as f:
|
||||
required = f.read().splitlines()
|
||||
|
||||
VERSION = "0.1.7.3"
|
||||
VERSION = "0.1.7.5"
|
||||
DESCRIPTION = (
|
||||
"The official gpt4free repository | various collection of powerful language models"
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user