mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-27 13:42:19 +03:00
Merge branch 'main' of https://github.com/xtekky/gpt4free
This commit is contained in:
commit
83443ee912
@ -4,6 +4,7 @@ import requests
|
||||
from .base_provider import BaseProvider
|
||||
from ..typing import CreateResult
|
||||
|
||||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
||||
models = {
|
||||
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
|
||||
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
|
||||
@ -32,29 +33,27 @@ class Aivvm(BaseProvider):
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
elif model not in models:
|
||||
raise ValueError(f"Model are not supported: {model}")
|
||||
|
||||
raise ValueError(f"Model is not supported: {model}")
|
||||
|
||||
headers = {
|
||||
"authority" : "chat.aivvm.com",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat.aivvm.com",
|
||||
"referer" : "https://chat.aivvm.com/",
|
||||
"sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"macOS"',
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
|
||||
"content-type" : "application/json",
|
||||
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform": "\"Bandóz\"",
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"Referer" : "https://chat.aivvm.com/",
|
||||
"Referrer-Policy" : "same-origin",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"model" : models[model],
|
||||
"messages" : messages,
|
||||
"key" : "",
|
||||
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
||||
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
|
||||
"temperature" : kwargs.get("temperature", 0.7)
|
||||
}
|
||||
|
||||
|
@ -3,25 +3,25 @@ from dataclasses import dataclass
|
||||
from .typing import Union
|
||||
from .Provider import BaseProvider, RetryProvider
|
||||
from .Provider import (
|
||||
AItianhuSpace,
|
||||
ChatgptLogin,
|
||||
ChatgptAi,
|
||||
ChatBase,
|
||||
Vercel,
|
||||
DeepAi,
|
||||
Aivvm,
|
||||
Bard,
|
||||
H2o,
|
||||
GptGo,
|
||||
Bing,
|
||||
PerplexityAi,
|
||||
ChatgptDuo,
|
||||
ChatgptAi,
|
||||
ChatBase,
|
||||
AItianhu,
|
||||
Wewordle,
|
||||
Yqcloud,
|
||||
AItianhu,
|
||||
AItianhuSpace,
|
||||
Aichat,
|
||||
Myshell,
|
||||
Vercel,
|
||||
DeepAi,
|
||||
Aichat,
|
||||
Aivvm,
|
||||
GptGo,
|
||||
Bard,
|
||||
Aibn,
|
||||
ChatgptDuo,
|
||||
Bing,
|
||||
H2o,
|
||||
)
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
@ -166,7 +166,17 @@ gpt_35_turbo_0613 = Model(
|
||||
gpt_4_0613 = Model(
|
||||
name = 'gpt-4-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
best_provider = Aivvm)
|
||||
|
||||
gpt_4_32k = Model(
|
||||
name = 'gpt-4-32k',
|
||||
base_provider = 'openai',
|
||||
best_provider = Aivvm)
|
||||
|
||||
gpt_4_32k_0613 = Model(
|
||||
name = 'gpt-4-32k-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Aivvm)
|
||||
|
||||
text_ada_001 = Model(
|
||||
name = 'text-ada-001',
|
||||
@ -206,13 +216,17 @@ llama7b_v2_chat = Model(
|
||||
|
||||
class ModelUtils:
|
||||
convert: dict[str, Model] = {
|
||||
# gpt-3.5 / gpt-4
|
||||
# gpt-3.5
|
||||
'gpt-3.5-turbo' : gpt_35_turbo,
|
||||
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
|
||||
'gpt-4' : gpt_4,
|
||||
'gpt-4-0613' : gpt_4_0613,
|
||||
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
|
||||
|
||||
# gpt-4
|
||||
'gpt-4' : gpt_4,
|
||||
'gpt-4-0613' : gpt_4_0613,
|
||||
'gpt-4-32k' : gpt_4_32k,
|
||||
'gpt-4-32k-0613' : gpt_4_32k_0613,
|
||||
|
||||
# Bard
|
||||
'palm2' : palm,
|
||||
'palm' : palm,
|
||||
@ -247,4 +261,4 @@ class ModelUtils:
|
||||
'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
|
||||
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
|
||||
'command-light-nightly' : command_light_nightly,
|
||||
}
|
||||
}
|
||||
|
@ -7,19 +7,21 @@ import g4f, asyncio
|
||||
|
||||
print("create:", end=" ", flush=True)
|
||||
for response in g4f.ChatCompletion.create(
|
||||
model=g4f.models.gpt_35_turbo,
|
||||
provider=g4f.Provider.GptGo,
|
||||
messages=[{"role": "user", "content": "hello!"}],
|
||||
model=g4f.models.gpt_4_32k_0613,
|
||||
provider=g4f.Provider.Aivvm,
|
||||
messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
|
||||
temperature=0.0,
|
||||
stream=True
|
||||
):
|
||||
print(response, end="", flush=True)
|
||||
print()
|
||||
|
||||
async def run_async():
|
||||
response = await g4f.ChatCompletion.create_async(
|
||||
model=g4f.models.gpt_35_turbo,
|
||||
provider=g4f.Provider.GptGo,
|
||||
model=g4f.models.gpt_35_turbo_16k_0613,
|
||||
provider=g4f.Provider.Aivvm,
|
||||
messages=[{"role": "user", "content": "hello!"}],
|
||||
)
|
||||
print("create_async:", response)
|
||||
|
||||
asyncio.run(run_async())
|
||||
# asyncio.run(run_async())
|
||||
|
Loading…
Reference in New Issue
Block a user