~ | Merge pull request #1033 from Commenter123321/main

add testing for all gpt models, enhance the gui a bit
This commit is contained in:
Tekky 2023-10-10 15:29:51 +01:00 committed by GitHub
commit 26cd71c7f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 206 additions and 88 deletions

67
etc/testing/test_all.py Normal file
View File

@ -0,0 +1,67 @@
import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
async def test(model: g4f.Model):
try:
try:
for response in g4f.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.1,
stream=True
):
print(response, end="")
print()
except:
for response in await g4f.ChatCompletion.create_async(
model=model,
messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.1,
stream=True
):
print(response, end="")
print()
return True
except Exception as e:
print(model.name, "not working:", e)
print(e.__traceback__.tb_next)
return False
async def start_test():
models_to_test = [
# GPT-3.5 4K Context
g4f.models.gpt_35_turbo,
g4f.models.gpt_35_turbo_0613,
# GPT-3.5 16K Context
g4f.models.gpt_35_turbo_16k,
g4f.models.gpt_35_turbo_16k_0613,
# GPT-4 8K Context
g4f.models.gpt_4,
g4f.models.gpt_4_0613,
# GPT-4 32K Context
g4f.models.gpt_4_32k,
g4f.models.gpt_4_32k_0613,
]
models_working = []
for model in models_to_test:
if await test(model):
models_working.append(model.name)
print("working models:", models_working)
asyncio.run(start_test())

View File

@ -7,10 +7,10 @@ import g4f, asyncio
print("create:", end=" ", flush=True)
for response in g4f.ChatCompletion.create(
model=g4f.models.default,
provider=g4f.Provider.GptForLove,
messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
temperature=0.0,
model=g4f.models.gpt_4_32k_0613,
provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.1,
stream=True
):
print(response, end="", flush=True)

6
etc/testing/test_gui.py Normal file
View File

@ -0,0 +1,6 @@
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
from g4f.gui import run_gui
run_gui()

View File

@ -1,8 +1,9 @@
from __future__ import annotations
import requests
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncResult, Messages
from .base_provider import BaseProvider
from ..typing import CreateResult
from json import dumps
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@ -16,22 +17,20 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
class Aivvm(AsyncGeneratorProvider):
class Aivvm(BaseProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
working = True
@classmethod
async def create_async_generator(
cls,
def create_completion(cls,
model: str,
messages: Messages,
messages: list[dict[str, str]],
stream: bool,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
@ -44,24 +43,33 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
data = dumps(json_data)
headers = {
"Accept": "*/*",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"accept" : "text/event-stream",
"accept-language" : "en-US,en;q=0.9",
"content-type" : "application/json",
"content-length" : str(len(data)),
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"sec-gpc" : "1",
"referrer" : "https://chat.aivvm.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}
async with StreamSession(
impersonate="chrome107",
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
if b'Access denied | chat.aivvm.com used Cloudflare' in chunk:
raise ValueError("Rate Limit | use another provider")
yield chunk.decode()
response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
response.raise_for_status()
for chunk in response.iter_content():
try:
yield chunk.decode("utf-8")
except UnicodeDecodeError:
yield chunk.decode("unicode-escape")
@classmethod
@property
@ -73,4 +81,4 @@ class Aivvm(AsyncGeneratorProvider):
('temperature', 'float'),
]
param = ', '.join([': '.join(p) for p in params])
return f'g4f.provider.{cls.__name__} supports: ({param})'
return f'g4f.provider.{cls.__name__} supports: ({param})'

View File

@ -1,7 +1,6 @@
from __future__ import annotations
import random
import uuid
import json
import os
import uuid

View File

@ -22,29 +22,29 @@ class DeepAi(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"
token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
var agent = '""" + agent + """'
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
h = Math.round(1E11 * Math.random()) + "";
f = function () {
for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
return function (t) {
var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
Z = [],
A = unescape(encodeURI(t)) + "\u0080",
z = A.length;
t = --z / 4 + 2 | 15;
for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
for (q = A = 0; q < t; q += 16) {
for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
for (A = 4; A;) ea[--A] += z[A]
}
for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
return t.split("").reverse().join("")
}
}();
f = function() {
for (var p = [], r = 0; 64 > r;) p[r] = 0 | 4294967296 * Math.sin(++r % Math.PI);
return function(z) {
var B, G, H, ca = [B = 1732584193, G = 4023233417, ~B, ~G],
X = [],
x = unescape(encodeURI(z)) + "\u0080",
v = x.length;
z = --v / 4 + 2 | 15;
for (X[--z] = 8 * v; ~v;) X[v >> 2] |= x.charCodeAt(v) << 8 * v--;
for (r = x = 0; r < z; r += 16) {
for (v = ca; 64 > x; v = [H = v[3], B + ((H = v[0] + [B & G | ~B & H, H & B | ~H & G, B ^ G ^ H, G ^ (B | ~H)][v = x >> 4] + p[x] + ~~X[r | [x, 5 * x + 1, 3 * x + 5, 7 * x][v] & 15]) << (v = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * v + x++ % 4]) | H >>> -v), B, G]) B = v[1] | 0, G = v[2];
for (x = 4; x;) ca[--x] += v[x]
}
for (z = ""; 32 > x;) z += (ca[x >> 3] >> 4 * (1 ^ x++) & 15).toString(16);
return z.split("").reverse().join("")
}
}();
"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
"""
@ -53,7 +53,7 @@ f = function () {
api_key = js2py.eval_js(token_js)
headers = {
"api-key": api_key,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
"User-Agent": agent,
**kwargs.get("headers", {})
}
async with ClientSession(
@ -65,7 +65,10 @@ f = function () {
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
try:
yield stream.decode("utf-8")
except UnicodeDecodeError:
yield stream.decode("unicode-escape")
def get_api_key(user_agent: str):

View File

@ -79,7 +79,7 @@
<span>Clear Conversations</span>
</button>
<div class="info">
<i class="fa-brands fa-discord"></i>
<i class="fa-brands fa-telegram"></i>
<span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br>
</span>
</div>
@ -118,9 +118,13 @@
<div class="field">
<select name="model" id="model">
<option value="gpt-3.5-turbo" selected>gpt-3.5</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
<option value="gpt-3.5-turbo-16k-0613">gpt-3.5 16k fast</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-4-0613">gpt-4 fast</option>
<option value="gpt-4-32k">gpt-4 32k</option>
<option value="gpt-4-32k-0613">gpt-4 32k fast</option>
</select>
</div>
<div class="field">

View File

@ -144,7 +144,7 @@ const ask_gpt = async (message) => {
chunk = new TextDecoder().decode(value);
if (chunk.includes(`<form id="challenge-form" action="/backend-api/v2/conversation?`)) {
if (chunk.includes('<form id="challenge-form" action="/backend-api/v2/conversation?"')) {
chunk = `cloudflare token expired, please refresh the page.`;
}
@ -161,7 +161,7 @@ const ask_gpt = async (message) => {
}
// if text contains :
if (text.includes(`instead. Maintaining this website and API costs a lot of money`)) {
if (text.includes("instead. Maintaining this website and API costs a lot of money")) {
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please reload / refresh cache and try again or use a differnet browser";
}
@ -547,7 +547,7 @@ colorThemes.forEach((themeOption) => {
setTimeout(() => {
ads_div = document.querySelector('.ads')
if (ads_div.getElementsByTagName("iframe").length == 0) {
if (ads_div != null && ads_div.getElementsByTagName("iframe").length == 0) {
ads_div.removeChild(ads_div.querySelector('.sorry'))
ads_div.innerHTML += `

View File

@ -33,17 +33,17 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
provider = get_provider(request.json.get('provider'))
provider = request.json.get('provider').split("g4f.Provider.")[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
if provider:
answer = g4f.ChatCompletion.create(model=model,
provider=provider, messages=messages, stream=True)
provider=get_provider(provider), messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True)
messages=messages, stream=True)
for token in answer:
yield token

View File

@ -1,28 +1,56 @@
from requests import get
from datetime import datetime
from duckduckgo_search import DDGS
ddgs = DDGS(timeout=20)
def search(internet_access, prompt):
print(prompt)
try:
if internet_access == False:
if not internet_access:
return []
search = get('https://ddg-api.herokuapp.com/search', params={
'query': prompt['content'],
'limit': 3
})
results = duckduckgo_search(q=prompt)
if not search:
return []
blob = ''
for index, result in enumerate(search.json()):
blob += f'[{index}] "{result["snippet"]}"\nURL:{result["link"]}\n\n'
for index, result in enumerate(results):
blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
date = datetime.now().strftime('%d/%m/%y')
blob += f'current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'
blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'
return [{'role': 'user', 'content': blob}]
except Exception as e:
return []
print("Couldn't search DuckDuckGo:", e)
print(e.__traceback__.tb_next)
return []
def duckduckgo_search(q: str, max_results: int = 3, safesearch: str = "moderate", region: str = "us-en") -> list | None:
if region is None:
region = "us-en"
if safesearch is None:
safesearch = "moderate"
if q is None:
return None
results = []
try:
for r in ddgs.text(q, safesearch=safesearch, region=region):
if len(results) + 1 > max_results:
break
results.append(r)
except Exception as e:
print(e)
return results

View File

@ -1,17 +1,14 @@
import g4f
from g4f import BaseProvider
def get_provider(provider: str) -> g4f.Provider.BaseProvider:
def get_provider(provider: str) -> BaseProvider | None:
if isinstance(provider, str):
print(provider)
if provider == 'g4f.Provider.Auto':
return None
if provider in g4f.Provider.ProviderUtils.convert:
return g4f.Provider.ProviderUtils.convert[provider]
else:
return None
return g4f.Provider.ProviderUtils.convert.get(provider)
else:
return None

View File

@ -65,14 +65,16 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
DeepAi, Aivvm, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = Bing
best_provider = RetryProvider([
Aivvm, Bing
])
)
# Bard
@ -165,27 +167,32 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai')
base_provider = 'openai',
best_provider = Aivvm
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider=Aivvm
best_provider = Aivvm
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
text_ada_001 = Model(

View File

@ -10,5 +10,4 @@ flask
flask-cors
typing-extensions
PyExecJS
flask
flask-cors
duckduckgo-search