Merge pull request #1261 from hlohaus/any

Add Poe Provider, Update AItianhuSpace Porvider
This commit is contained in:
H Lohaus 2023-11-17 03:21:50 +01:00 committed by GitHub
commit f57fa3a43f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 249 additions and 86 deletions

View File

@ -1,95 +1,128 @@
from __future__ import annotations from __future__ import annotations
import random, json import time
import random
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, format_prompt, get_browser
from .. import debug from .. import debug
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
domains = { class AItianhuSpace(BaseProvider):
"gpt-3.5-turbo": "aitianhu.space",
"gpt-4": "aitianhu.website",
}
class AItianhuSpace(AsyncGeneratorProvider):
url = "https://chat3.aiyunos.top/" url = "https://chat3.aiyunos.top/"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_domains = ["aitianhu.com", "aitianhu1.top"]
@classmethod @classmethod
async def create_async_generator(cls, def create_completion(
cls,
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, stream: bool,
domain: str = None, domain: str = None,
cookies: dict = None, proxy: str = None,
timeout: int = 10, **kwargs) -> AsyncResult: timeout: int = 120,
browser: WebDriver = None,
hidden_display: bool = True,
**kwargs
) -> CreateResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif model not in domains:
raise ValueError(f"Model are not supported: {model}")
if not domain: if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6)) rand = ''.join(random.choice(chars) for _ in range(6))
domain = f"{rand}.{domains[model]}" domain = random.choice(cls._domains)
domain = f"{rand}.{domain}"
if debug.logging: if debug.logging:
print(f"AItianhuSpace | using domain: {domain}") print(f"AItianhuSpace | using domain: {domain}")
url = f"https://{domain}"
if not cookies: prompt = format_prompt(messages)
cookies = get_cookies('.aitianhu.space') if browser:
if not cookies: driver = browser
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]")
url = f'https://{domain}'
async with StreamSession(proxies={"https": proxy},
cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
headers = {
"Authority": url,
"Accept": "application/json, text/plain, */*",
"Origin": url,
"Referer": f"{url}/"
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else: else:
raise RuntimeError(f"Response: {line}") if hidden_display:
driver, display = get_browser("", True, proxy)
else:
driver = get_browser("", False, proxy)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@classmethod wait = WebDriverWait(driver, timeout)
@property
def params(cls): # Bypass devtools detection
params = [ driver.get("https://blank.page/")
("model", "str"), wait.until(EC.visibility_of_element_located((By.ID, "sheet")))
("messages", "list[dict[str, str]]"), driver.execute_script(f"""
("stream", "bool"), document.getElementById('sheet').addEventListener('click', () => {{
("temperature", "float"), window.open('{url}', '_blank');
("top_p", "int"), }});
] """)
param = ", ".join([": ".join(p) for p in params]) driver.find_element(By.ID, "sheet").click()
return f"g4f.provider.{cls.__name__} supports: ({param})" time.sleep(10)
original_window = driver.current_window_handle
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.switch_to.window(window_handle)
break
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))
try:
# Add hook in XMLHttpRequest
script = """
const _http_request_open = XMLHttpRequest.prototype.open;
window._last_message = window._message = "";
window._loadend = false;
XMLHttpRequest.prototype.open = function(method, url) {
if (url == "/api/chat-process") {
this.addEventListener("progress", (event) => {
const lines = this.responseText.split("\\n");
try {
window._message = JSON.parse(lines[lines.length-1])["text"];
} catch(e) { }
});
this.addEventListener("loadend", (event) => {
window._loadend = true;
});
}
return _http_request_open.call(this, method, url);
}
"""
driver.execute_script(script)
# Input and submit prompt
driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.n-button.n-button--primary-type.n-button--medium-type").click()
# Yield response
while True:
chunk = driver.execute_script("""
if (window._message && window._message != window._last_message) {
try {
return window._message.substring(window._last_message.length);
} finally {
window._last_message = window._message;
}
}
if (window._loadend) {
return null;
}
return "";
""")
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -38,11 +38,11 @@ class MyShell(BaseProvider):
driver.get(cls.url) driver.get(cls.url)
try: try:
# Wait for page load # Wait for page load and cloudflare validation
WebDriverWait(driver, timeout).until( WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)")) EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)"))
) )
# Send message # Send request with message
script = """ script = """
response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
"headers": { "headers": {
@ -66,7 +66,7 @@ window.reader = response.body.getReader();
script = """ script = """
chunk = await window.reader.read(); chunk = await window.reader.read();
if (chunk['done']) return null; if (chunk['done']) return null;
text = (new TextDecoder ()).decode(chunk['value']); text = (new TextDecoder()).decode(chunk['value']);
content = ''; content = '';
text.split('\\n').forEach((line, index) => { text.split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) { if (line.startsWith('data: ')) {
@ -81,7 +81,7 @@ text.split('\\n').forEach((line, index) => {
return content; return content;
""" """
while True: while True:
chunk = driver.execute_script(script): chunk = driver.execute_script(script)
if chunk: if chunk:
yield chunk yield chunk
elif chunk != "": elif chunk != "":

View File

@ -19,7 +19,7 @@ from browser_cookie3 import (
) )
try: try:
from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webdriver import WebDriver
except ImportError: except ImportError:
class WebDriver(): class WebDriver():
pass pass
try: try:

View File

@ -0,0 +1,129 @@
from __future__ import annotations
import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
from ..helper import WebDriver, format_prompt, get_browser
models = {
"meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
"meta-llama/Llama-2-13b-chat-hf": {"name": "Llama-2-13b"},
"meta-llama/Llama-2-70b-chat-hf": {"name": "Llama-2-70b"},
"codellama/CodeLlama-7b-Instruct-hf": {"name": "Code-Llama-7b"},
"codellama/CodeLlama-13b-Instruct-hf": {"name": "Code-Llama-13b"},
"codellama/CodeLlama-34b-Instruct-hf": {"name": "Code-Llama-34b"},
"gpt-3.5-turbo": {"name": "GPT-3.5-Turbo"},
"gpt-3.5-turbo-instruct": {"name": "GPT-3.5-Turbo-Instruct"},
"gpt-4": {"name": "GPT-4"},
"palm": {"name": "Google-PaLM"},
}
class Poe(BaseProvider):
url = "https://poe.com"
working = True
supports_gpt_35_turbo = True
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
browser: WebDriver = None,
hidden_display: bool = True,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model are not supported: {model}")
prompt = format_prompt(messages)
if browser:
driver = browser
else:
if hidden_display:
driver, display = get_browser(None, True, proxy)
else:
driver = get_browser(None, False, proxy)
script = """
window._message = window._last_message = "";
window._message_finished = false;
class ProxiedWebSocket extends WebSocket {
constructor(url, options) {
super(url, options);
this.addEventListener("message", (e) => {
const data = JSON.parse(JSON.parse(e.data)["messages"][0])["payload"]["data"];
if ("messageAdded" in data) {
if (data["messageAdded"]["author"] != "human") {
window._message = data["messageAdded"]["text"];
if (data["messageAdded"]["state"] == "complete") {
window._message_finished = true;
}
}
}
});
}
}
window.WebSocket = ProxiedWebSocket;
"""
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": script
})
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 10 if hidden_display else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
except:
# Reopen browser for login
if not browser:
driver.quit()
if hidden_display:
display.stop()
driver = get_browser(None, False, proxy)
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
else:
raise RuntimeError("Prompt textarea not found. You may not be logged in.")
driver.find_element(By.CSS_SELECTOR, "footer textarea[class^='GrowingTextArea']").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "footer button[class*='ChatMessageSendButton']").click()
try:
script = """
if(window._message && window._message != window._last_message) {
try {
return window._message.substring(window._last_message.length);
} finally {
window._last_message = window._message;
}
} else if(window._message_finished) {
return null;
} else {
return '';
}
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -4,3 +4,4 @@ from .Theb import Theb
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant from .OpenAssistant import OpenAssistant
from .Poe import Poe