From 58c45522ea4f94f90c3ebbf350eba0b5715848ae Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Mon, 9 Oct 2023 20:53:31 +0200 Subject: [PATCH] add cool testing for gpt-3.5 and and gpt-4 --- etc/testing/test_all.py | 67 +++++++++++++++++++++++++++++ etc/testing/test_chat_completion.py | 8 ++-- g4f/Provider/Aivvm.py | 42 ++++++++++-------- g4f/Provider/DeepAi.py | 5 ++- g4f/models.py | 20 ++++++--- 5 files changed, 112 insertions(+), 30 deletions(-) create mode 100644 etc/testing/test_all.py diff --git a/etc/testing/test_all.py b/etc/testing/test_all.py new file mode 100644 index 00000000..73134e3f --- /dev/null +++ b/etc/testing/test_all.py @@ -0,0 +1,67 @@ +import asyncio +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +import g4f + + +async def test(model: g4f.Model): + try: + try: + for response in g4f.ChatCompletion.create( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + except: + for response in await g4f.ChatCompletion.create_async( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + + return True + except Exception as e: + print(model.name, "not working:", e) + print(e.__traceback__.tb_next) + return False + + +async def start_test(): + models_to_test = [ + # GPT-3.5 4K Context + g4f.models.gpt_35_turbo, + g4f.models.gpt_35_turbo_0613, + + # GPT-3.5 16K Context + g4f.models.gpt_35_turbo_16k, + g4f.models.gpt_35_turbo_16k_0613, + + # GPT-4 8K Context + g4f.models.gpt_4, + g4f.models.gpt_4_0613, + + # GPT-4 32K Context + g4f.models.gpt_4_32k, + g4f.models.gpt_4_32k_0613, + ] + + models_working = [] + + for model in models_to_test: + if await test(model): + models_working.append(model.name) + + print("working models:", models_working) + + +asyncio.run(start_test()) diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index ee523b86..7058ab4c 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -7,10 +7,10 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.default, - provider=g4f.Provider.GptForLove, - messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], - temperature=0.0, + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, stream=True ): print(response, end="", flush=True) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 1ba6d6f1..ac15ac16 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -3,6 +3,7 @@ import requests from .base_provider import BaseProvider from ..typing import CreateResult +from json import dumps # to recreate this easily, send a post request to https://chat.aivvm.com/api/models models = { @@ -35,20 +36,6 @@ class Aivvm(BaseProvider): elif model not in models: raise ValueError(f"Model is not supported: {model}") - headers = { - "accept" : "*/*", - "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", - "content-type" : "application/json", - "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform": "\"Bandóz\"", - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "Referer" : "https://chat.aivvm.com/", - "Referrer-Policy" : "same-origin", - } - json_data = { "model" : models[model], "messages" : messages, @@ -57,12 +44,29 @@ class Aivvm(BaseProvider): "temperature" : kwargs.get("temperature", 0.7) } - response = requests.post( - "https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) + headers = { + "accept" : "text/event-stream", + "accept-language" : "en-US,en;q=0.9", + "content-type" : "application/json", + "content-length" : str(len(dumps(json_data))), + "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"", + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": "\"Windows\"", + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "sec-gpc" : "1", + "referrer" : "https://chat.aivvm.com/" + } + + response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) response.raise_for_status() - for chunk in response.iter_content(chunk_size=None): - yield chunk.decode('utf-8') + for chunk in response.iter_content(): + try: + yield chunk.decode("utf-8") + except UnicodeDecodeError: + yield chunk.decode("unicode-escape") @classmethod @property @@ -74,4 +78,4 @@ class Aivvm(BaseProvider): ('temperature', 'float'), ] param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file + return f'g4f.provider.{cls.__name__} supports: ({param})' diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py index bac3e3fe..9a4f922c 100644 --- a/g4f/Provider/DeepAi.py +++ b/g4f/Provider/DeepAi.py @@ -65,7 +65,10 @@ f = function () { response.raise_for_status() async for stream in response.content.iter_any(): if stream: - yield stream.decode() + try: + yield stream.decode("utf-8") + except UnicodeDecodeError: + yield stream.decode("unicode-escape") def get_api_key(user_agent: str): diff --git a/g4f/models.py b/g4f/models.py index b4247703..aa2b3bd6 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -23,6 +23,7 @@ from .Provider import ( GptGod, AiAsk, GptGo, + Aivvm, Ylokh, Bard, Aibn, @@ -72,7 +73,9 @@ gpt_35_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', - best_provider = Bing + best_provider = RetryProvider([ + Aivvm, Bing + ]) ) # Bard @@ -165,26 +168,31 @@ gpt_35_turbo_16k = Model( gpt_35_turbo_16k_0613 = Model( name = 'gpt-3.5-turbo-16k-0613', - base_provider = 'openai') + base_provider = 'openai', + best_provider = Aivvm) gpt_35_turbo_0613 = Model( name = 'gpt-3.5-turbo-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_0613 = Model( name = 'gpt-4-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_32k = Model( name = 'gpt-4-32k', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_32k_0613 = Model( name = 'gpt-4-32k-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) text_ada_001 = Model(