'Refactored by Sourcery' (#1125)

Co-authored-by: Sourcery AI <>
This commit is contained in:
ⲘrṨhส∂ow 2023-10-23 09:46:25 +02:00 committed by GitHub
parent b1dbf66587
commit 3982f39424
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 416 additions and 429 deletions

View File

@ -5,9 +5,7 @@ async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
return " ".join([result, secs]) if result else secs
def log_time_yield(method: callable, **kwargs):
@ -20,6 +18,4 @@ def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
return " ".join([result, secs]) if result else secs

View File

@ -22,11 +22,11 @@ def test_provider(provider):
return None
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for provider in __all__:
if provider not in _:
futures.append(executor.submit(test_provider, provider))
futures = [
executor.submit(test_provider, provider)
for provider in __all__
if provider not in _
]
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result:
if result := future.result():
print(f'{result[1]} | {result[0]}')

View File

@ -8,8 +8,7 @@ sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
def read_code(text):
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
if match:
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")
def input_command():
@ -99,12 +98,11 @@ And replace "gpt-3.5-turbo" with `model`.
print()
response = "".join(response)
code = read_code(response)
if code:
if code := read_code(response):
with open(provider_path, "w") as file:
file.write(code)
print("Saved at:", provider_path)
with open(f"g4f/Provider/__init__.py", "a") as file:
with open("g4f/Provider/__init__.py", "a") as file:
file.write(f"\nfrom .{name} import {name}")
else:
with open(provider_path, "r") as file:

View File

@ -8,8 +8,7 @@ sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
def read_code(text):
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
if match:
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")
path = input("Path: ")
@ -41,7 +40,6 @@ for chunk in g4f.ChatCompletion.create(
print()
response = "".join(response)
code = read_code(response)
if code:
if code := read_code(response):
with open(path, "w") as file:
file.write(code)

View File

@ -52,7 +52,7 @@ async def test_async(provider: type[BaseProvider]):
response = await provider.create_async(model=model, messages=messages)
else:
response = provider.create_completion(model=model, messages=messages, stream=False)
return True if response else False
return bool(response)
except Exception as e:
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")

View File

@ -22,11 +22,9 @@ def get_model_info() -> dict[str, Any]:
urls = [f"{url}/_next/{path}" for path in paths]
scripts = [session.get(url).text for url in urls]
for script in scripts:
models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
matches = re.findall(models_regex, script)
if matches:
for script in scripts:
if matches := re.findall(models_regex, script):
models_str = matches[0]
stop_sequences_regex = r"(?<=stopSequences:{value:\[)\D(?<!\])"
models_str = re.sub(

View File

@ -70,12 +70,12 @@ class AItianhu(AsyncGeneratorProvider):
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
else:
if "detail" not in line:
raise RuntimeError(f"Response: {line}")
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
@classmethod

View File

@ -28,7 +28,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
elif model not in domains:
raise ValueError(f"Model are not supported: {model}")
if not domain:
@ -71,8 +71,9 @@ class AItianhuSpace(AsyncGeneratorProvider):
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")

View File

@ -20,7 +20,9 @@ class Aichat(AsyncProvider):
cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
if not cookies:
raise RuntimeError(f"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]")
raise RuntimeError(
"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
)
headers = {
'authority': 'chat-gpt.org',

View File

@ -68,10 +68,9 @@ class Ails(AsyncGeneratorProvider):
if line.startswith(start) and line != "data: [DONE]":
line = line[len(start):-1]
line = json.loads(line)
token = line["choices"][0]["delta"].get("content")
if token:
if token := line["choices"][0]["delta"].get("content"):
if "ai.ls" in token or "ai.ci" in token:
raise Exception("Response Error: " + token)
raise Exception(f"Response Error: {token}")
yield token
@ -89,12 +88,7 @@ class Ails(AsyncGeneratorProvider):
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = "%s:%s:%s:%s" % (
json_data["t"],
json_data["m"],
"WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
len(json_data["m"]),
)
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())

View File

@ -56,9 +56,10 @@ class Bing(AsyncGeneratorProvider):
return stream_generate(prompt, tone, image, context, proxy, cookies)
def create_context(messages: Messages):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context
return "".join(
f"[{message['role']}](#message)\n{message['content']}\n\n"
for message in messages
)
class Conversation():
def __init__(self, conversationId: str, clientId: str, conversationSignature: str, imageInfo: dict=None) -> None:
@ -110,30 +111,30 @@ async def create_conversation(session: ClientSession, tone: str, image: str = No
new_img_binary_data = compress_image_to_base64(new_img, compression_rate)
data, boundary = build_image_upload_api_payload(new_img_binary_data, conversation, tone)
headers = session.headers.copy()
headers["content-type"] = 'multipart/form-data; boundary=' + boundary
headers["content-type"] = f'multipart/form-data; boundary={boundary}'
headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
headers["origin"] = 'https://www.bing.com'
async with await session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
if image_upload_response.status == 200:
if image_upload_response.status != 200:
raise Exception("Failed to upload image.")
image_info = await image_upload_response.json()
result = {}
if image_info.get('blobId'):
result['bcid'] = image_info.get('blobId', "")
if not image_info.get('blobId'):
raise Exception("Failed to parse image info.")
result = {'bcid': image_info.get('blobId', "")}
result['blurredBcid'] = image_info.get('processedBlobId', "")
if result['blurredBcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
elif result['bcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
if config['visualSearch']["enableFaceBlurDebug"]:
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
else:
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
result['originalImageUrl'] = (
"https://www.bing.com/images/blob?bcid="
+ result['blurredBcid']
if config['visualSearch']["enableFaceBlurDebug"]
else "https://www.bing.com/images/blob?bcid="
+ result['bcid']
)
conversation.imageInfo = result
else:
raise Exception("Failed to parse image info.")
else:
raise Exception("Failed to upload image.")
except Exception as e:
print(f"An error happened while trying to send image: {str(e)}")
return conversation
@ -282,7 +283,18 @@ def build_image_upload_api_payload(image_bin: str, conversation: Conversation, t
'knowledgeRequest': payload
}
boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
data = '--' + boundary + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n' + json.dumps(knowledge_request,ensure_ascii=False) + "\r\n--" + boundary + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n' + image_bin + "\r\n--" + boundary + "--\r\n"
data = (
f'--{boundary}'
+ '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n'
+ json.dumps(knowledge_request, ensure_ascii=False)
+ "\r\n--"
+ boundary
+ '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n'
+ image_bin
+ "\r\n--"
+ boundary
+ "--\r\n"
)
return data, boundary
def is_data_uri_an_image(data_uri):
@ -329,7 +341,7 @@ def extract_data_uri(data_uri):
def get_orientation(data: bytes):
try:
if data[0:2] != b'\xFF\xD8':
if data[:2] != b'\xFF\xD8':
raise Exception('NotJpeg')
with Image.open(data) as img:
exif_data = img._getexif()
@ -347,11 +359,11 @@ def process_image(orientation, img, new_width, new_height):
if orientation:
if orientation > 4:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if orientation == 3 or orientation == 4:
if orientation in [3, 4]:
img = img.transpose(Image.ROTATE_180)
if orientation == 5 or orientation == 6:
if orientation in [5, 6]:
img = img.transpose(Image.ROTATE_270)
if orientation == 7 or orientation == 8:
if orientation in [7, 8]:
img = img.transpose(Image.ROTATE_90)
new_img.paste(img, (0, 0))
return new_img
@ -362,8 +374,7 @@ def compress_image_to_base64(img, compression_rate):
try:
output_buffer = io.BytesIO()
img.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
base64_image = base64.b64encode(output_buffer.getvalue()).decode('utf-8')
return base64_image
return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
except Exception as e:
raise e
@ -431,12 +442,7 @@ async def stream_generate(
) as session:
conversation = await create_conversation(session, tone, image, proxy)
try:
async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub',
autoping=False,
params={'sec_access_token': conversation.conversationSignature},
proxy=proxy
) as wss:
async with session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', autoping=False, params={'sec_access_token': conversation.conversationSignature}, proxy=proxy) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=900)

View File

@ -22,14 +22,14 @@ class ChatBase(AsyncGeneratorProvider):
chat_id = 'z2c2HSfKnCTh5J4650V0I'
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(headers=headers) as session:
data = {

View File

@ -32,7 +32,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"stream": True
}
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):

View File

@ -44,8 +44,7 @@ class ChatgptAi(AsyncGeneratorProvider):
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
text = await response.text()
result = re.search(r"data-system='(.*?)'", text)
if result:
if result := re.search(r"data-system='(.*?)'", text):
cls._system = json.loads(html.unescape(result.group(1)))
if not cls._system:
raise RuntimeError("System args not found")

View File

@ -37,10 +37,13 @@ class ChatgptDemo(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
raise RuntimeError("No user id found")
if result := re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
user_id = result.group(1)
else:
raise RuntimeError("No user id found")
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
@ -56,6 +59,5 @@ class ChatgptDemo(AsyncGeneratorProvider):
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:-1])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
if chunk := line["choices"][0]["delta"].get("content"):
yield chunk

View File

@ -68,12 +68,12 @@ class ChatgptFree(AsyncProvider):
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
result = re.search(r'data-nonce="(.*?)"', response)
if not result:
raise RuntimeError("No nonce found")
if result := re.search(r'data-nonce="(.*?)"', response):
cls._nonce = result.group(1)
else:
raise RuntimeError("No nonce found")
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
@ -83,8 +83,7 @@ class ChatgptFree(AsyncProvider):
"message": prompt,
"bot_id": "0"
}
async with session.post(cls.url + "/wp-admin/admin-ajax.php",
data=data, cookies=cookies) as response:
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
response.raise_for_status()
return (await response.json())["data"]

View File

@ -45,10 +45,13 @@ class ChatgptLogin(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
raise RuntimeError("No user id found")
if result := re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
cls._user_id = result.group(1)
else:
raise RuntimeError("No user id found")
async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
@ -64,8 +67,9 @@ class ChatgptLogin(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
content = json.loads(line[6:])["choices"][0]["delta"].get("content")
if content:
if content := json.loads(line[6:])["choices"][0][
"delta"
].get("content"):
yield content
async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
response.raise_for_status()

View File

@ -35,14 +35,15 @@ class ChatgptX(AsyncGeneratorProvider):
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result:
if result := re.search(
r'<meta name="csrf-token" content="(.*?)"', response
):
csrf_token = result.group(1)
result = re.search(r"openconversions\('(.*?)'\)", response)
if result:
if result := re.search(r"openconversions\('(.*?)'\)", response):
chat_id = result.group(1)
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
if result:
if result := re.search(
r'<input type="hidden" id="user_id" value="(.*?)"', response
):
user_id = result.group(1)
if not csrf_token or not chat_id or not user_id:
@ -63,7 +64,7 @@ class ChatgptX(AsyncGeneratorProvider):
'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest'
}
async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
chat = await response.json()
if "response" not in chat or not chat["response"]:

View File

@ -49,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
}
async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
data = {
@ -69,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
headers = {
'authorization': f'Bearer {cls._auth["token"]}',
}
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
last_message = ""
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):

View File

@ -23,7 +23,9 @@ class GptChatly(AsyncProvider):
cookies = get_cookies('gptchatly.com') if not cookies else cookies
if not cookies:
raise RuntimeError(f"g4f.provider.GptChatly requires cookies, [refresh https://gptchatly.com on chrome]")
raise RuntimeError(
"g4f.provider.GptChatly requires cookies, [refresh https://gptchatly.com on chrome]"
)
if model.startswith("gpt-4"):
chat_url = f"{cls.url}/fetch-gpt4-response"

View File

@ -55,8 +55,9 @@ class GptForLove(AsyncGeneratorProvider):
except:
raise RuntimeError(f"Broken line: {line}")
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "10分钟内提问超过了5次" in line:
raise RuntimeError("Rate limit reached")
@ -66,7 +67,7 @@ class GptForLove(AsyncGeneratorProvider):
def get_secret() -> str:
dir = os.path.dirname(__file__)
include = dir + '/npm/node_modules/crypto-js/crypto-js'
include = f'{dir}/npm/node_modules/crypto-js/crypto-js'
source = """
CryptoJS = require({include})
var k = '14487141bvirvvG'

View File

@ -21,14 +21,14 @@ class GptGo(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
@ -62,8 +62,7 @@ class GptGo(AsyncGeneratorProvider):
line = json.loads(line[len(start):-1])
if line["choices"][0]["finish_reason"] == "stop":
break
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -49,8 +49,7 @@ class GptGod(AsyncGeneratorProvider):
if line.startswith(b'event: '):
event = line[7:-1]
elif event == b"data" and line.startswith(b"data: "):
data = json.loads(line[6:-1])
if data:
if data := json.loads(line[6:-1]):
yield data
elif event == b"done":
break

View File

@ -49,7 +49,7 @@ class Liaobots(AsyncGeneratorProvider):
"authority": "liaobots.com",
"content-type": "application/json",
"origin": cls.url,
"referer": cls.url + "/",
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(

View File

@ -29,25 +29,22 @@ class Vercel(BaseProvider):
raise ValueError(f"Vercel does not support {model}")
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'custom-encoding' : get_anti_bot_token(),
'origin' : 'https://sdk.vercel.ai',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile' : '?0',
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'custom-encoding': get_anti_bot_token(),
'origin': 'https://sdk.vercel.ai',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
random.randint(99, 999),
random.randint(99, 999)
)
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
json_data = {
@ -60,7 +57,7 @@ class Vercel(BaseProvider):
}
max_retries = kwargs.get('max_retries', 20)
for i in range(max_retries):
for _ in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
@ -74,22 +71,19 @@ class Vercel(BaseProvider):
def get_anti_bot_token() -> str:
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile' : '?0',
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
random.randint(99, 999),
random.randint(99, 999)
)
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
response = requests.get('https://sdk.vercel.ai/openai.jpeg',

View File

@ -23,10 +23,7 @@ class Ylokh(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {
"Origin" : cls.url,
"Referer": cls.url + "/",
}
headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
data = {
"messages": messages,
"model": model,
@ -52,8 +49,9 @@ class Ylokh(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get(
"content"
):
yield content
else:
chat = await response.json()

View File

@ -18,9 +18,12 @@ class AiService(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
base += "\nassistant: "
base = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",

View File

@ -20,14 +20,14 @@ class CodeLinkAva(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
@ -46,8 +46,7 @@ class CodeLinkAva(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -69,9 +69,9 @@ class EasyChat(BaseProvider):
response = session.post(f"{server}/api/openai/v1/chat/completions",
headers=headers, json=json_data, stream=stream)
if response.status_code == 200:
if stream == False:
if response.status_code != 200:
raise Exception(f"Error {response.status_code} from server : {response.reason}")
if not stream:
json_data = response.json()
if "choices" in json_data:
@ -88,10 +88,6 @@ class EasyChat(BaseProvider):
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
else:
continue
else:
raise Exception(f"Error {response.status_code} from server : {response.reason}")
@classmethod

View File

@ -65,8 +65,7 @@ class Equing(BaseProvider):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
if token := line_json['choices'][0]['delta'].get('content'):
yield token
@classmethod

View File

@ -69,8 +69,9 @@ class FastGpt(BaseProvider):
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
if token := line_json['choices'][0]['delta'].get(
'content'
):
yield token
except:
continue

View File

@ -22,7 +22,7 @@ class H2o(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": cls.url + "/"}
headers = {"Referer": f"{cls.url}/"}
async with ClientSession(
headers=headers

View File

@ -47,8 +47,7 @@ class Lockchat(BaseProvider):
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content")
if token:
if token := token["choices"][0]["delta"].get("content"):
yield (token)
@classmethod

View File

@ -21,9 +21,12 @@ class V50(BaseProvider):
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
conversation += "\nassistant: "
conversation = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
payload = {
"prompt" : conversation,
"options" : {},
@ -47,8 +50,12 @@ class V50(BaseProvider):
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post("https://p5.v50.ltd/api/chat-process",
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
response = requests.post(
"https://p5.v50.ltd/api/chat-process",
json=payload,
headers=headers,
proxies=kwargs.get('proxy', {}),
)
if "https://fk1.v50.ltd" not in response.text:
yield response.text

View File

@ -20,14 +20,14 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "text/event-stream",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "text/event-stream",
"Accept-language": "de,en-US;q=0.7,en;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
@ -49,8 +49,7 @@ class Vitalentum(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -41,15 +41,20 @@ class Wuguokai(BaseProvider):
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
response = requests.post(
"https://ai-api20.wuguokai.xyz/api/chat-process",
headers=headers,
timeout=3,
json=data,
proxies=kwargs.get('proxy', {}),
)
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
if response.status_code == 200:
if response.status_code != 200:
raise Exception(f"Error: {response.status_code} {response.reason}")
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()
else:
raise Exception(f"Error: {response.status_code} {response.reason}")
@classmethod
@property

View File

@ -77,9 +77,8 @@ def get_cookies(domain_name=''):
def g4f(domain_name):
user_data_dir = user_config_dir("g4f")
cookie_file = path.join(user_data_dir, "Default", "Cookies")
if not path.exists(cookie_file):
return []
return chrome(cookie_file, domain_name)
return [] if not path.exists(cookie_file) else chrome(cookie_file, domain_name)
cookies = {}
for cookie_fn in [g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
try:
@ -96,16 +95,15 @@ def get_cookies(domain_name=''):
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join(
[
"%s: %s" % ((message["role"]).capitalize(), message["content"])
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]
def get_browser(user_data_dir: str = None):

View File

@ -45,11 +45,11 @@ class Bard(AsyncProvider):
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if not match:
raise RuntimeError("No snlm0e value.")
if match := re.search(r'SNlM0e\":\"(.*?)\"', text):
cls._snlm0e = match.group(1)
else:
raise RuntimeError("No snlm0e value.")
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
'_reqid': random.randint(1111, 9999),

View File

@ -32,12 +32,10 @@ class Raycast(BaseProvider):
'Content-Type': 'application/json',
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
}
parsed_messages = []
for message in messages:
parsed_messages.append({
'author': message['role'],
'content': {'text': message['content']}
})
parsed_messages = [
{'author': message['role'], 'content': {'text': message['content']}}
for message in messages
]
data = {
"debug": False,
"locale": "en-CN",

View File

@ -33,21 +33,21 @@ class Theb(BaseProvider):
org_id = auth["org_id"]
headers = {
'authority' : 'beta.theb.ai',
'accept' : 'text/event-stream',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization' : 'Bearer '+bearer_token,
'content-type' : 'application/json',
'origin' : 'https://beta.theb.ai',
'referer' : 'https://beta.theb.ai/home',
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile' : '?0',
'authority': 'beta.theb.ai',
'accept': 'text/event-stream',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization': f'Bearer {bearer_token}',
'content-type': 'application/json',
'origin': 'https://beta.theb.ai',
'referer': 'https://beta.theb.ai/home',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
}
req_rand = random.randint(100000000, 9999999999)
@ -82,7 +82,7 @@ class Theb(BaseProvider):
next_content = content
data = json.loads(chunk.decode().split("data: ")[1])
content = data["content"]
yield data["content"].replace(next_content, "")
yield content.replace(next_content, "")
@classmethod
@property

View File

@ -43,10 +43,12 @@ class ChatAiGpt(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'data-nonce=(.*?) data-post-id=([0-9]+)', response)
if not result:
raise RuntimeError("No nonce found")
if result := re.search(
r'data-nonce=(.*?) data-post-id=([0-9]+)', response
):
cls._nonce, cls._post_id = result.group(1), result.group(2)
else:
raise RuntimeError("No nonce found")
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,

View File

@ -48,8 +48,7 @@ class MikuChat(AsyncGeneratorProvider):
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
if chunk := line["choices"][0]["delta"].get("content"):
yield chunk
def k(e: str, t: int):

View File

@ -21,7 +21,7 @@ class PerplexityAi(AsyncProvider):
proxy: str = None,
**kwargs
) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling"
url = f"{cls.url}/socket.io/?EIO=4&transport=polling"
headers = {
"Referer": f"{cls.url}/"
}

View File

@ -82,7 +82,7 @@ class ChatCompletion:
ignored : List[str] = None, **kwargs) -> str:
if stream:
raise ValueError(f'"create_async" does not support "stream" argument')
raise ValueError('"create_async" does not support "stream" argument')
model, provider = get_model_and_provider(model, provider, False, ignored)

View File

@ -62,7 +62,7 @@ class Api:
self.app.register_error_handler(ex, self.__handle_error)
if not self.debug:
self.logger.warning('Serving on http://{}:{}'.format(host, port))
self.logger.warning(f'Serving on http://{host}:{port}')
WSGIRequestHandler.protocol_version = 'HTTP/1.1'
serve(self.app, host=host, port=port, ident=None, threads=threads)
@ -76,7 +76,7 @@ class Api:
@staticmethod
def __after_request(resp):
resp.headers['X-Server'] = 'g4f/%s' % g4f.version
resp.headers['X-Server'] = f'g4f/{g4f.version}'
return resp

View File

@ -35,9 +35,7 @@ class Backend_Api:
return 'ok', 200
def models(self):
models = g4f._all_models
return models
return g4f._all_models
def _gen_title(self):
return {
@ -56,15 +54,14 @@ class Backend_Api:
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
if provider:
answer = g4f.ChatCompletion.create(model=model,
provider=get_provider(provider), messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True)
for token in answer:
yield token
yield from g4f.ChatCompletion.create(
model=model,
provider=get_provider(provider),
messages=messages,
stream=True,
) if provider else g4f.ChatCompletion.create(
model=model, messages=messages, stream=True
)
return self.app.response_class(stream(), mimetype='text/event-stream')

View File

@ -19,11 +19,10 @@ def search(internet_access, prompt):
if not search:
return []
blob = ''
for index, result in enumerate(results):
blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
blob = ''.join(
f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
for index, result in enumerate(results)
)
date = datetime.now().strftime('%d/%m/%y')
blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'

View File

@ -5,12 +5,10 @@ from g4f import BaseProvider
def get_provider(provider: str) -> BaseProvider | None:
if isinstance(provider, str):
if not isinstance(provider, str):
return None
print(provider)
if provider == 'g4f.Provider.Auto':
return None
return g4f.Provider.ProviderUtils.convert.get(provider)
else:
return None

View File

@ -25,8 +25,8 @@ class Website:
}
def _chat(self, conversation_id):
if not '-' in conversation_id:
return redirect(f'/chat')
if '-' not in conversation_id:
return redirect('/chat')
return render_template('index.html', chat_id = conversation_id)

View File

@ -52,10 +52,7 @@ class StreamResponse:
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
lines = chunk.split(delimiter) if delimiter else chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else: