'Refactored by Sourcery' (#1125)

Co-authored-by: Sourcery AI <>
This commit is contained in:
ⲘrṨhส∂ow 2023-10-23 09:46:25 +02:00 committed by GitHub
parent b1dbf66587
commit 3982f39424
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 416 additions and 429 deletions

View File

@ -5,9 +5,7 @@ async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
return " ".join([result, secs]) if result else secs
def log_time_yield(method: callable, **kwargs):
@ -20,6 +18,4 @@ def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
return " ".join([result, secs]) if result else secs

View File

@ -22,11 +22,11 @@ def test_provider(provider):
return None
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for provider in __all__:
if provider not in _:
futures.append(executor.submit(test_provider, provider))
futures = [
executor.submit(test_provider, provider)
for provider in __all__
if provider not in _
]
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result:
if result := future.result():
print(f'{result[1]} | {result[0]}')

View File

@ -8,8 +8,7 @@ sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
def read_code(text):
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
if match:
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")
def input_command():
@ -99,12 +98,11 @@ And replace "gpt-3.5-turbo" with `model`.
print()
response = "".join(response)
code = read_code(response)
if code:
if code := read_code(response):
with open(provider_path, "w") as file:
file.write(code)
print("Saved at:", provider_path)
with open(f"g4f/Provider/__init__.py", "a") as file:
with open("g4f/Provider/__init__.py", "a") as file:
file.write(f"\nfrom .{name} import {name}")
else:
with open(provider_path, "r") as file:

View File

@ -8,8 +8,7 @@ sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
def read_code(text):
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
if match:
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")
path = input("Path: ")
@ -41,7 +40,6 @@ for chunk in g4f.ChatCompletion.create(
print()
response = "".join(response)
code = read_code(response)
if code:
if code := read_code(response):
with open(path, "w") as file:
file.write(code)

View File

@ -52,7 +52,7 @@ async def test_async(provider: type[BaseProvider]):
response = await provider.create_async(model=model, messages=messages)
else:
response = provider.create_completion(model=model, messages=messages, stream=False)
return True if response else False
return bool(response)
except Exception as e:
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")

View File

@ -22,11 +22,9 @@ def get_model_info() -> dict[str, Any]:
urls = [f"{url}/_next/{path}" for path in paths]
scripts = [session.get(url).text for url in urls]
models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
for script in scripts:
models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
matches = re.findall(models_regex, script)
if matches:
if matches := re.findall(models_regex, script):
models_str = matches[0]
stop_sequences_regex = r"(?<=stopSequences:{value:\[)\D(?<!\])"
models_str = re.sub(

View File

@ -24,9 +24,9 @@ class AItianhu(AsyncGeneratorProvider):
if not cookies:
cookies = get_cookies(domain_name='www.aitianhu.com')
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://www.aitianhu.com on chrome]")
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://www.aitianhu.com on chrome]")
data = {
"prompt": format_prompt(messages),
"options": {},
@ -35,7 +35,7 @@ class AItianhu(AsyncGeneratorProvider):
"top_p": 1,
**kwargs
}
headers = {
'authority': 'www.aitianhu.com',
'accept': 'application/json, text/plain, */*',
@ -51,31 +51,31 @@ class AItianhu(AsyncGeneratorProvider):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(headers=headers,
cookies=cookies,
timeout=timeout,
proxies={"https": proxy},
impersonate="chrome107", verify=False) as session:
cookies=cookies,
timeout=timeout,
proxies={"https": proxy},
impersonate="chrome107", verify=False) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
raise RuntimeError("Solve challenge and pass cookies")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
else:
if "detail" not in line:
raise RuntimeError(f"Response: {line}")
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
@classmethod

View File

@ -27,26 +27,26 @@ class AItianhuSpace(AsyncGeneratorProvider):
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
elif model not in domains:
raise ValueError(f"Model are not supported: {model}")
if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = f"{rand}.{domains[model]}"
if debug.logging:
print(f"AItianhuSpace | using domain: {domain}")
if not cookies:
cookies = get_cookies('.aitianhu.space')
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]")
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]")
url = f'https://{domain}'
async with StreamSession(proxies={"https": proxy},
cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session:
cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session:
data = {
"prompt": format_prompt(messages),
@ -71,8 +71,9 @@ class AItianhuSpace(AsyncGeneratorProvider):
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")

View File

@ -20,8 +20,10 @@ class Aichat(AsyncProvider):
cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
if not cookies:
raise RuntimeError(f"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]")
raise RuntimeError(
"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
)
headers = {
'authority': 'chat-gpt.org',
'accept': '*/*',
@ -37,13 +39,13 @@ class Aichat(AsyncProvider):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(headers=headers,
cookies=cookies,
timeout=6,
proxies={"https": proxy} if proxy else None,
impersonate="chrome110", verify=False) as session:
json_data = {
"message": format_prompt(messages),
"temperature": kwargs.get('temperature', 0.5),
@ -51,14 +53,14 @@ class Aichat(AsyncProvider):
"top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
async with session.post("https://chat-gpt.org/api/text",
json=json_data) as response:
response.raise_for_status()
result = await response.json()
if not result['response']:
raise Exception(f"Error Response: {result}")
return result["message"]

View File

@ -44,8 +44,8 @@ class Ails(AsyncGeneratorProvider):
"from-url": "https://ai.ls/?chat=1"
}
async with ClientSession(
headers=headers
) as session:
headers=headers
) as session:
timestamp = _format_timestamp(int(time.time() * 1000))
json_data = {
"model": "gpt-3.5-turbo",
@ -57,10 +57,10 @@ class Ails(AsyncGeneratorProvider):
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
}
async with session.post(
"https://api.caipacity.com/v1/chat/completions",
proxy=proxy,
json=json_data
) as response:
"https://api.caipacity.com/v1/chat/completions",
proxy=proxy,
json=json_data
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
@ -68,10 +68,9 @@ class Ails(AsyncGeneratorProvider):
if line.startswith(start) and line != "data: [DONE]":
line = line[len(start):-1]
line = json.loads(line)
token = line["choices"][0]["delta"].get("content")
if token:
if token := line["choices"][0]["delta"].get("content"):
if "ai.ls" in token or "ai.ci" in token:
raise Exception("Response Error: " + token)
raise Exception(f"Response Error: {token}")
yield token
@ -89,12 +88,7 @@ class Ails(AsyncGeneratorProvider):
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = "%s:%s:%s:%s" % (
json_data["t"],
json_data["m"],
"WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
len(json_data["m"]),
)
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())

View File

@ -56,9 +56,10 @@ class Bing(AsyncGeneratorProvider):
return stream_generate(prompt, tone, image, context, proxy, cookies)
def create_context(messages: Messages):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context
return "".join(
f"[{message['role']}](#message)\n{message['content']}\n\n"
for message in messages
)
class Conversation():
def __init__(self, conversationId: str, clientId: str, conversationSignature: str, imageInfo: dict=None) -> None:
@ -71,7 +72,7 @@ async def create_conversation(session: ClientSession, tone: str, image: str = No
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4'
async with await session.get(url, proxy=proxy) as response:
data = await response.json()
conversationId = data.get('conversationId')
clientId = data.get('clientId')
conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
@ -110,30 +111,30 @@ async def create_conversation(session: ClientSession, tone: str, image: str = No
new_img_binary_data = compress_image_to_base64(new_img, compression_rate)
data, boundary = build_image_upload_api_payload(new_img_binary_data, conversation, tone)
headers = session.headers.copy()
headers["content-type"] = 'multipart/form-data; boundary=' + boundary
headers["content-type"] = f'multipart/form-data; boundary={boundary}'
headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
headers["origin"] = 'https://www.bing.com'
async with await session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
if image_upload_response.status == 200:
image_info = await image_upload_response.json()
result = {}
if image_info.get('blobId'):
result['bcid'] = image_info.get('blobId', "")
result['blurredBcid'] = image_info.get('processedBlobId', "")
if result['blurredBcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
elif result['bcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
if config['visualSearch']["enableFaceBlurDebug"]:
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
else:
result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
conversation.imageInfo = result
else:
raise Exception("Failed to parse image info.")
else:
if image_upload_response.status != 200:
raise Exception("Failed to upload image.")
image_info = await image_upload_response.json()
if not image_info.get('blobId'):
raise Exception("Failed to parse image info.")
result = {'bcid': image_info.get('blobId', "")}
result['blurredBcid'] = image_info.get('processedBlobId', "")
if result['blurredBcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
elif result['bcid'] != "":
result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
result['originalImageUrl'] = (
"https://www.bing.com/images/blob?bcid="
+ result['blurredBcid']
if config['visualSearch']["enableFaceBlurDebug"]
else "https://www.bing.com/images/blob?bcid="
+ result['bcid']
)
conversation.imageInfo = result
except Exception as e:
print(f"An error happened while trying to send image: {str(e)}")
return conversation
@ -282,7 +283,18 @@ def build_image_upload_api_payload(image_bin: str, conversation: Conversation, t
'knowledgeRequest': payload
}
boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
data = '--' + boundary + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n' + json.dumps(knowledge_request,ensure_ascii=False) + "\r\n--" + boundary + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n' + image_bin + "\r\n--" + boundary + "--\r\n"
data = (
f'--{boundary}'
+ '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n'
+ json.dumps(knowledge_request, ensure_ascii=False)
+ "\r\n--"
+ boundary
+ '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n'
+ image_bin
+ "\r\n--"
+ boundary
+ "--\r\n"
)
return data, boundary
def is_data_uri_an_image(data_uri):
@ -329,7 +341,7 @@ def extract_data_uri(data_uri):
def get_orientation(data: bytes):
try:
if data[0:2] != b'\xFF\xD8':
if data[:2] != b'\xFF\xD8':
raise Exception('NotJpeg')
with Image.open(data) as img:
exif_data = img._getexif()
@ -347,11 +359,11 @@ def process_image(orientation, img, new_width, new_height):
if orientation:
if orientation > 4:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if orientation == 3 or orientation == 4:
if orientation in [3, 4]:
img = img.transpose(Image.ROTATE_180)
if orientation == 5 or orientation == 6:
if orientation in [5, 6]:
img = img.transpose(Image.ROTATE_270)
if orientation == 7 or orientation == 8:
if orientation in [7, 8]:
img = img.transpose(Image.ROTATE_90)
new_img.paste(img, (0, 0))
return new_img
@ -362,8 +374,7 @@ def compress_image_to_base64(img, compression_rate):
try:
output_buffer = io.BytesIO()
img.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
base64_image = base64.b64encode(output_buffer.getvalue()).decode('utf-8')
return base64_image
return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
except Exception as e:
raise e
@ -425,19 +436,14 @@ async def stream_generate(
cookies: dict = None
):
async with ClientSession(
timeout=ClientTimeout(total=900),
cookies=cookies,
headers=Defaults.headers,
) as session:
timeout=ClientTimeout(total=900),
cookies=cookies,
headers=Defaults.headers,
) as session:
conversation = await create_conversation(session, tone, image, proxy)
try:
async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub',
autoping=False,
params={'sec_access_token': conversation.conversationSignature},
proxy=proxy
) as wss:
async with session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', autoping=False, params={'sec_access_token': conversation.conversationSignature}, proxy=proxy) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=900)
await wss.send_str(create_message(conversation, prompt, tone, context))
@ -451,7 +457,7 @@ async def stream_generate(
for obj in objects:
if obj is None or not obj:
continue
response = json.loads(obj)
if response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0]

View File

@ -20,16 +20,16 @@ class ChatBase(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
chat_id = 'z2c2HSfKnCTh5J4650V0I'
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(headers=headers) as session:
data = {
@ -38,7 +38,7 @@ class ChatBase(AsyncGeneratorProvider):
"chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
}
async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for stream in response.content.iter_any():

View File

@ -31,8 +31,8 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):

View File

@ -38,18 +38,17 @@ class ChatgptAi(AsyncGeneratorProvider):
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers
) as session:
headers=headers
) as session:
if not cls._system:
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
text = await response.text()
result = re.search(r"data-system='(.*?)'", text)
if result:
if result := re.search(r"data-system='(.*?)'", text):
cls._system = json.loads(html.unescape(result.group(1)))
if not cls._system:
raise RuntimeError("System args not found")
if not cls._system:
raise RuntimeError("System args not found")
data = {
"botId": cls._system["botId"],
"customId": cls._system["customId"],

View File

@ -37,10 +37,13 @@ class ChatgptDemo(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
if result := re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
user_id = result.group(1)
else:
raise RuntimeError("No user id found")
user_id = result.group(1)
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
@ -56,6 +59,5 @@ class ChatgptDemo(AsyncGeneratorProvider):
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:-1])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
if chunk := line["choices"][0]["delta"].get("content"):
yield chunk

View File

@ -31,8 +31,8 @@ class ChatgptFree(AsyncProvider):
if not cookies:
cookies = get_cookies('chatgptfree.ai')
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://chatgptfree.ai on chrome]")
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://chatgptfree.ai on chrome]")
headers = {
'authority': 'chatgptfree.ai',
@ -48,14 +48,14 @@ class ChatgptFree(AsyncProvider):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(
headers=headers,
cookies=cookies,
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
headers=headers,
cookies=cookies,
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
if not cls._nonce:
async with session.get(f"{cls.url}/") as response:
@ -67,13 +67,13 @@ class ChatgptFree(AsyncProvider):
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
result = re.search(r'data-nonce="(.*?)"', response)
if not result:
if result := re.search(r'data-nonce="(.*?)"', response):
cls._nonce = result.group(1)
else:
raise RuntimeError("No nonce found")
cls._nonce = result.group(1)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
@ -83,8 +83,7 @@ class ChatgptFree(AsyncProvider):
"message": prompt,
"bot_id": "0"
}
async with session.post(cls.url + "/wp-admin/admin-ajax.php",
data=data, cookies=cookies) as response:
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
response.raise_for_status()
return (await response.json())["data"]

View File

@ -45,10 +45,13 @@ class ChatgptLogin(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
if result := re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
cls._user_id = result.group(1)
else:
raise RuntimeError("No user id found")
cls._user_id = result.group(1)
async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
@ -64,8 +67,9 @@ class ChatgptLogin(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
content = json.loads(line[6:])["choices"][0]["delta"].get("content")
if content:
if content := json.loads(line[6:])["choices"][0][
"delta"
].get("content"):
yield content
async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
response.raise_for_status()

View File

@ -35,14 +35,15 @@ class ChatgptX(AsyncGeneratorProvider):
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result:
if result := re.search(
r'<meta name="csrf-token" content="(.*?)"', response
):
csrf_token = result.group(1)
result = re.search(r"openconversions\('(.*?)'\)", response)
if result:
if result := re.search(r"openconversions\('(.*?)'\)", response):
chat_id = result.group(1)
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
if result:
if result := re.search(
r'<input type="hidden" id="user_id" value="(.*?)"', response
):
user_id = result.group(1)
if not csrf_token or not chat_id or not user_id:
@ -63,7 +64,7 @@ class ChatgptX(AsyncGeneratorProvider):
'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest'
}
async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
chat = await response.json()
if "response" not in chat or not chat["response"]:

View File

@ -49,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
}
async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
data = {
@ -69,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
headers = {
'authorization': f'Bearer {cls._auth["token"]}',
}
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
last_message = ""
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):

View File

@ -23,13 +23,15 @@ class GptChatly(AsyncProvider):
cookies = get_cookies('gptchatly.com') if not cookies else cookies
if not cookies:
raise RuntimeError(f"g4f.provider.GptChatly requires cookies, [refresh https://gptchatly.com on chrome]")
raise RuntimeError(
"g4f.provider.GptChatly requires cookies, [refresh https://gptchatly.com on chrome]"
)
if model.startswith("gpt-4"):
chat_url = f"{cls.url}/fetch-gpt4-response"
else:
chat_url = f"{cls.url}/fetch-response"
headers = {
'authority': 'gptchatly.com',
'accept': '*/*',
@ -45,7 +47,7 @@ class GptChatly(AsyncProvider):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(headers=headers,
proxies={"https": proxy}, cookies=cookies, impersonate='chrome110') as session:
data = {

View File

@ -55,8 +55,9 @@ class GptForLove(AsyncGeneratorProvider):
except:
raise RuntimeError(f"Broken line: {line}")
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "10分钟内提问超过了5次" in line:
raise RuntimeError("Rate limit reached")
@ -66,7 +67,7 @@ class GptForLove(AsyncGeneratorProvider):
def get_secret() -> str:
dir = os.path.dirname(__file__)
include = dir + '/npm/node_modules/crypto-js/crypto-js'
include = f'{dir}/npm/node_modules/crypto-js/crypto-js'
source = """
CryptoJS = require({include})
var k = '14487141bvirvvG'

View File

@ -21,18 +21,18 @@ class GptGo(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
) as session:
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
params={
@ -46,12 +46,12 @@ class GptGo(AsyncGeneratorProvider):
token = (await response.json(content_type=None))["token"]
async with session.get(
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
@ -62,8 +62,7 @@ class GptGo(AsyncGeneratorProvider):
line = json.loads(line[len(start):-1])
if line["choices"][0]["finish_reason"] == "stop":
break
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -33,7 +33,7 @@ class GptGod(AsyncGeneratorProvider):
"Pragma": "no-cache",
"Cache-Control": "no-cache",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
@ -45,12 +45,11 @@ class GptGod(AsyncGeneratorProvider):
event = None
async for line in response.content:
print(line)
if line.startswith(b'event: '):
event = line[7:-1]
elif event == b"data" and line.startswith(b"data: "):
data = json.loads(line[6:-1])
if data:
if data := json.loads(line[6:-1]):
yield data
elif event == b"done":
break

View File

@ -49,7 +49,7 @@ class Liaobots(AsyncGeneratorProvider):
"authority": "liaobots.com",
"content-type": "application/json",
"origin": cls.url,
"referer": cls.url + "/",
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(

View File

@ -24,30 +24,27 @@ class Vercel(BaseProvider):
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Vercel does not support {model}")
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'custom-encoding' : get_anti_bot_token(),
'origin' : 'https://sdk.vercel.ai',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile' : '?0',
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'custom-encoding': get_anti_bot_token(),
'origin': 'https://sdk.vercel.ai',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
random.randint(99, 999),
random.randint(99, 999)
)
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
json_data = {
@ -60,7 +57,7 @@ class Vercel(BaseProvider):
}
max_retries = kwargs.get('max_retries', 20)
for i in range(max_retries):
for _ in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
@ -74,22 +71,19 @@ class Vercel(BaseProvider):
def get_anti_bot_token() -> str:
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile' : '?0',
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
random.randint(99, 999),
random.randint(99, 999)
)
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
response = requests.get('https://sdk.vercel.ai/openai.jpeg',

View File

@ -23,10 +23,7 @@ class Ylokh(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {
"Origin" : cls.url,
"Referer": cls.url + "/",
}
headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
data = {
"messages": messages,
"model": model,
@ -39,10 +36,10 @@ class Ylokh(AsyncGeneratorProvider):
**kwargs
}
async with StreamSession(
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
if stream:
@ -52,8 +49,9 @@ class Ylokh(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get(
"content"
):
yield content
else:
chat = await response.json()

View File

@ -18,9 +18,12 @@ class AiService(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
base += "\nassistant: "
base = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",

View File

@ -20,18 +20,18 @@ class CodeLinkAva(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
) as session:
headers=headers
) as session:
data = {
"messages": messages,
"temperature": 0.6,
@ -46,8 +46,7 @@ class CodeLinkAva(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -30,7 +30,7 @@ class EasyChat(BaseProvider):
"https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
]
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = {
"authority" : f"{server}".replace("https://", ""),
@ -68,30 +68,26 @@ class EasyChat(BaseProvider):
response = session.post(f"{server}/api/openai/v1/chat/completions",
headers=headers, json=json_data, stream=stream)
if response.status_code == 200:
if stream == False:
json_data = response.json()
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
raise Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
else:
continue
else:
if response.status_code != 200:
raise Exception(f"Error {response.status_code} from server : {response.reason}")
if not stream:
json_data = response.json()
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
raise Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
@classmethod

View File

@ -56,18 +56,17 @@ class Equing(BaseProvider):
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
if token := line_json['choices'][0]['delta'].get('content'):
yield token
@classmethod
@property

View File

@ -55,7 +55,7 @@ class FastGpt(BaseProvider):
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
subdomain = random.choice([
'jdaen979ew',
'chat9'
@ -63,15 +63,16 @@ class FastGpt(BaseProvider):
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
if token := line_json['choices'][0]['delta'].get(
'content'
):
yield token
except:
continue

View File

@ -22,7 +22,7 @@ class H2o(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": cls.url + "/"}
headers = {"Referer": f"{cls.url}/"}
async with ClientSession(
headers=headers

View File

@ -33,7 +33,7 @@ class Lockchat(BaseProvider):
}
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
response.raise_for_status()
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
@ -44,11 +44,10 @@ class Lockchat(BaseProvider):
stream = stream,
temperature = temperature,
**kwargs)
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content")
if token:
if token := token["choices"][0]["delta"].get("content"):
yield (token)
@classmethod

View File

@ -21,9 +21,12 @@ class V50(BaseProvider):
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
conversation += "\nassistant: "
conversation = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
payload = {
"prompt" : conversation,
"options" : {},
@ -33,7 +36,7 @@ class V50(BaseProvider):
"model" : model,
"user" : str(uuid.uuid4())
}
headers = {
'authority' : 'p5.v50.ltd',
'accept' : 'application/json, text/plain, */*',
@ -47,9 +50,13 @@ class V50(BaseProvider):
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post("https://p5.v50.ltd/api/chat-process",
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
response = requests.post(
"https://p5.v50.ltd/api/chat-process",
json=payload,
headers=headers,
proxies=kwargs.get('proxy', {}),
)
if "https://fk1.v50.ltd" not in response.text:
yield response.text

View File

@ -20,14 +20,14 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "text/event-stream",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "text/event-stream",
"Accept-language": "de,en-US;q=0.7,en;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
@ -39,8 +39,8 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
}
async with ClientSession(
headers=headers
) as session:
headers=headers
) as session:
async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
@ -49,8 +49,7 @@ class Vitalentum(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
if content := line["choices"][0]["delta"].get("content"):
yield content

View File

@ -41,15 +41,20 @@ class Wuguokai(BaseProvider):
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
response = requests.post(
"https://ai-api20.wuguokai.xyz/api/chat-process",
headers=headers,
timeout=3,
json=data,
proxies=kwargs.get('proxy', {}),
)
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
if response.status_code == 200:
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()
else:
if response.status_code != 200:
raise Exception(f"Error: {response.status_code} {response.reason}")
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()
@classmethod
@property

View File

@ -77,9 +77,8 @@ def get_cookies(domain_name=''):
def g4f(domain_name):
user_data_dir = user_config_dir("g4f")
cookie_file = path.join(user_data_dir, "Default", "Cookies")
if not path.exists(cookie_file):
return []
return chrome(cookie_file, domain_name)
return [] if not path.exists(cookie_file) else chrome(cookie_file, domain_name)
cookies = {}
for cookie_fn in [g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
try:
@ -96,16 +95,15 @@ def get_cookies(domain_name=''):
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
[
"%s: %s" % ((message["role"]).capitalize(), message["content"])
for message in messages
]
)
return f"{formatted}\nAssistant:"
else:
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join(
[
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
]
)
return f"{formatted}\nAssistant:"
def get_browser(user_data_dir: str = None):

View File

@ -38,18 +38,18 @@ class Bard(AsyncProvider):
'x-same-domain': '1',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
cookies=cookies,
headers=headers
) as session:
if not cls._snlm0e:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if not match:
if match := re.search(r'SNlM0e\":\"(.*?)\"', text):
cls._snlm0e = match.group(1)
else:
raise RuntimeError("No snlm0e value.")
cls._snlm0e = match.group(1)
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
'_reqid': random.randint(1111, 9999),

View File

@ -32,12 +32,10 @@ class Raycast(BaseProvider):
'Content-Type': 'application/json',
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
}
parsed_messages = []
for message in messages:
parsed_messages.append({
'author': message['role'],
'content': {'text': message['content']}
})
parsed_messages = [
{'author': message['role'], 'content': {'text': message['content']}}
for message in messages
]
data = {
"debug": False,
"locale": "en-CN",

View File

@ -28,28 +28,28 @@ class Theb(BaseProvider):
"bearer_token":"free",
"org_id":"theb",
})
bearer_token = auth["bearer_token"]
org_id = auth["org_id"]
headers = {
'authority' : 'beta.theb.ai',
'accept' : 'text/event-stream',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization' : 'Bearer '+bearer_token,
'content-type' : 'application/json',
'origin' : 'https://beta.theb.ai',
'referer' : 'https://beta.theb.ai/home',
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile' : '?0',
'authority': 'beta.theb.ai',
'accept': 'text/event-stream',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'authorization': f'Bearer {bearer_token}',
'content-type': 'application/json',
'origin': 'https://beta.theb.ai',
'referer': 'https://beta.theb.ai/home',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
}
req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = {
@ -65,7 +65,7 @@ class Theb(BaseProvider):
"long_term_memory" : "auto"
}
}
response = requests.post(
f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
headers=headers,
@ -73,7 +73,7 @@ class Theb(BaseProvider):
stream=True,
proxies={"https": proxy}
)
response.raise_for_status()
content = ""
next_content = ""
@ -82,7 +82,7 @@ class Theb(BaseProvider):
next_content = content
data = json.loads(chunk.decode().split("data: ")[1])
content = data["content"]
yield data["content"].replace(next_content, "")
yield content.replace(next_content, "")
@classmethod
@property

View File

@ -43,10 +43,12 @@ class ChatAiGpt(AsyncGeneratorProvider):
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'data-nonce=(.*?) data-post-id=([0-9]+)', response)
if not result:
if result := re.search(
r'data-nonce=(.*?) data-post-id=([0-9]+)', response
):
cls._nonce, cls._post_id = result.group(1), result.group(2)
else:
raise RuntimeError("No nonce found")
cls._nonce, cls._post_id = result.group(1), result.group(2)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,

View File

@ -48,8 +48,7 @@ class MikuChat(AsyncGeneratorProvider):
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
if chunk := line["choices"][0]["delta"].get("content"):
yield chunk
def k(e: str, t: int):

View File

@ -21,7 +21,7 @@ class PerplexityAi(AsyncProvider):
proxy: str = None,
**kwargs
) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling"
url = f"{cls.url}/socket.io/?EIO=4&transport=polling"
headers = {
"Referer": f"{cls.url}/"
}

View File

@ -82,7 +82,7 @@ class ChatCompletion:
ignored : List[str] = None, **kwargs) -> str:
if stream:
raise ValueError(f'"create_async" does not support "stream" argument')
raise ValueError('"create_async" does not support "stream" argument')
model, provider = get_model_and_provider(model, provider, False, ignored)

View File

@ -41,7 +41,7 @@ class Api:
def run(self, bind_str, threads=8):
host, port = self.__parse_bind(bind_str)
CORS(self.app, resources={r'/v1/*': {'supports_credentials': True, 'expose_headers': [
'Content-Type',
'Authorization',
@ -51,18 +51,18 @@ class Api:
'Access-Control-Request-Method',
'Access-Control-Request-Headers',
'Content-Disposition'], 'max_age': 600}})
self.app.route('/v1/models', methods=['GET'])(self.models)
self.app.route('/v1/models/<model_id>', methods=['GET'])(self.model_info)
self.app.route('/v1/chat/completions', methods=['POST'])(self.chat_completions)
self.app.route('/v1/completions', methods=['POST'])(self.completions)
for ex in default_exceptions:
self.app.register_error_handler(ex, self.__handle_error)
if not self.debug:
self.logger.warning('Serving on http://{}:{}'.format(host, port))
self.logger.warning(f'Serving on http://{host}:{port}')
WSGIRequestHandler.protocol_version = 'HTTP/1.1'
serve(self.app, host=host, port=port, ident=None, threads=threads)
@ -76,7 +76,7 @@ class Api:
@staticmethod
def __after_request(resp):
resp.headers['X-Server'] = 'g4f/%s' % g4f.version
resp.headers['X-Server'] = f'g4f/{g4f.version}'
return resp

View File

@ -35,9 +35,7 @@ class Backend_Api:
return 'ok', 200
def models(self):
models = g4f._all_models
return models
return g4f._all_models
def _gen_title(self):
return {
@ -52,19 +50,18 @@ class Backend_Api:
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
provider = request.json.get('provider').split('g4f.Provider.')[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
if provider:
answer = g4f.ChatCompletion.create(model=model,
provider=get_provider(provider), messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True)
for token in answer:
yield token
yield from g4f.ChatCompletion.create(
model=model,
provider=get_provider(provider),
messages=messages,
stream=True,
) if provider else g4f.ChatCompletion.create(
model=model, messages=messages, stream=True
)
return self.app.response_class(stream(), mimetype='text/event-stream')

View File

@ -9,21 +9,20 @@ ddgs = DDGS(timeout=20)
def search(internet_access, prompt):
print(prompt)
try:
if not internet_access:
return []
results = duckduckgo_search(q=prompt)
if not search:
return []
blob = ''
for index, result in enumerate(results):
blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
blob = ''.join(
f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
for index, result in enumerate(results)
)
date = datetime.now().strftime('%d/%m/%y')
blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'

View File

@ -5,12 +5,10 @@ from g4f import BaseProvider
def get_provider(provider: str) -> BaseProvider | None:
if isinstance(provider, str):
print(provider)
if provider == 'g4f.Provider.Auto':
return None
return g4f.Provider.ProviderUtils.convert.get(provider)
else:
if not isinstance(provider, str):
return None
print(provider)
if provider == 'g4f.Provider.Auto':
return None
return g4f.Provider.ProviderUtils.convert.get(provider)

View File

@ -25,9 +25,9 @@ class Website:
}
def _chat(self, conversation_id):
if not '-' in conversation_id:
return redirect(f'/chat')
if '-' not in conversation_id:
return redirect('/chat')
return render_template('index.html', chat_id = conversation_id)
def _index(self):

View File

@ -52,10 +52,7 @@ class StreamResponse:
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
lines = chunk.split(delimiter) if delimiter else chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else: