mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-22 15:05:57 +03:00
A few small fixes in GUI and Providers (#1861)
This commit is contained in:
parent
5fd118f3c9
commit
81cf5d7c77
@ -16,12 +16,12 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"meta/llama-2-7b-chat",
|
||||
"meta/llama-2-13b-chat",
|
||||
"meta/llama-2-70b-chat",
|
||||
"meta/llama-3-8b-chat",
|
||||
"meta/llama-3-70b-chat",
|
||||
"meta/meta-llama-3-8b-instruct",
|
||||
"meta/meta-llama-3-70b-instruct",
|
||||
]
|
||||
model_aliases = {
|
||||
"meta-llama/Meta-Llama-3-8b": "meta/llama-3-8b-chat",
|
||||
"meta-llama/Meta-Llama-3-70b": "meta/llama-3-70b-chat",
|
||||
"meta-llama/Meta-Llama-3-8b-instruct": "meta/meta-llama-3-8b-instruct",
|
||||
"meta-llama/Meta-Llama-3-70b-instruct": "meta/meta-llama-3-70b-instruct",
|
||||
"meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
|
||||
"meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
|
||||
"meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
|
||||
|
@ -15,10 +15,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
working = True
|
||||
default_model = "mixtral-8x7b-instruct"
|
||||
models = [
|
||||
"sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct",
|
||||
"codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct",
|
||||
"gemma-2b-it", "gemma-7b-it"
|
||||
"mistral-medium", "related", "dbrx-instruct"
|
||||
"sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it", "related"
|
||||
]
|
||||
model_aliases = {
|
||||
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
|
||||
@ -93,4 +90,4 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if data["final"]:
|
||||
break
|
||||
except:
|
||||
raise RuntimeError(f"Message: {message}")
|
||||
raise RuntimeError(f"Message: {message}")
|
||||
|
@ -220,7 +220,7 @@
|
||||
<option value="gpt-4">gpt-4</option>
|
||||
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
|
||||
<option value="llama2-70b">llama2-70b</option>
|
||||
<option value="llama3-70b">llama2-70b</option>
|
||||
<option value="llama3-70b-instruct">llama3-70b-instruct</option>
|
||||
<option value="gemini-pro">gemini-pro</option>
|
||||
<option value="">----</option>
|
||||
</select>
|
||||
|
@ -4,28 +4,33 @@ from dataclasses import dataclass
|
||||
|
||||
from .Provider import RetryProvider, ProviderType
|
||||
from .Provider import (
|
||||
Aichatos,
|
||||
Bing,
|
||||
Blackbox,
|
||||
Chatgpt4Online,
|
||||
PerplexityLabs,
|
||||
GeminiProChat,
|
||||
ChatgptAi,
|
||||
ChatgptNext,
|
||||
Cohere,
|
||||
Cnote,
|
||||
DeepInfra,
|
||||
Feedough,
|
||||
FreeGpt,
|
||||
Gemini,
|
||||
GeminiProChat,
|
||||
GigaChat,
|
||||
HuggingChat,
|
||||
HuggingFace,
|
||||
OpenaiChat,
|
||||
ChatgptAi,
|
||||
DeepInfra,
|
||||
GigaChat,
|
||||
Liaobots,
|
||||
FreeGpt,
|
||||
Llama,
|
||||
Vercel,
|
||||
Gemini,
|
||||
Koala,
|
||||
Cohere,
|
||||
Bing,
|
||||
You,
|
||||
Liaobots,
|
||||
Llama,
|
||||
OpenaiChat,
|
||||
PerplexityLabs,
|
||||
Pi,
|
||||
Vercel,
|
||||
You,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
class Model:
|
||||
"""
|
||||
@ -79,6 +84,9 @@ gpt_35_turbo = Model(
|
||||
ChatgptNext,
|
||||
Koala,
|
||||
OpenaiChat,
|
||||
Aichatos,
|
||||
Cnote,
|
||||
Feedough,
|
||||
])
|
||||
)
|
||||
|
||||
@ -132,14 +140,14 @@ llama2_70b = Model(
|
||||
best_provider = RetryProvider([Llama, DeepInfra, HuggingChat])
|
||||
)
|
||||
|
||||
llama3_8b = Model(
|
||||
name = "meta-llama/Meta-Llama-3-8b",
|
||||
llama3_8b_instruct = Model(
|
||||
name = "meta-llama/Meta-Llama-3-8b-instruct",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([Llama])
|
||||
)
|
||||
|
||||
llama3_70b = Model(
|
||||
name = "meta-llama/Meta-Llama-3-70b",
|
||||
llama3_70b_instruct = Model(
|
||||
name = "meta-llama/Meta-Llama-3-70b-instruct",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([Llama, HuggingChat])
|
||||
)
|
||||
@ -291,6 +299,12 @@ command_r_plus = Model(
|
||||
best_provider = RetryProvider([HuggingChat, Cohere])
|
||||
)
|
||||
|
||||
blackbox = Model(
|
||||
name = 'blackbox',
|
||||
base_provider = 'blackbox',
|
||||
best_provider = Blackbox
|
||||
)
|
||||
|
||||
class ModelUtils:
|
||||
"""
|
||||
Utility class for mapping string identifiers to Model instances.
|
||||
@ -314,12 +328,12 @@ class ModelUtils:
|
||||
'gpt-4-32k-0613' : gpt_4_32k_0613,
|
||||
'gpt-4-turbo' : gpt_4_turbo,
|
||||
|
||||
# Llama 2
|
||||
# Llama
|
||||
'llama2-7b' : llama2_7b,
|
||||
'llama2-13b': llama2_13b,
|
||||
'llama2-70b': llama2_70b,
|
||||
'llama3-8b' : llama3_8b,
|
||||
'llama3-70b': llama3_70b,
|
||||
'llama3-8b-instruct' : llama3_8b_instruct,
|
||||
'llama3-70b-instruct': llama3_70b_instruct,
|
||||
'codellama-34b-instruct': codellama_34b_instruct,
|
||||
'codellama-70b-instruct': codellama_70b_instruct,
|
||||
|
||||
@ -345,6 +359,7 @@ class ModelUtils:
|
||||
'claude-3-sonnet': claude_3_sonnet,
|
||||
|
||||
# other
|
||||
'blackbox': blackbox,
|
||||
'command-r+': command_r_plus,
|
||||
'dbrx-instruct': dbrx_instruct,
|
||||
'lzlv-70b': lzlv_70b,
|
||||
|
Loading…
Reference in New Issue
Block a user