mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-26 21:21:21 +03:00
Fix typo in model list
This commit is contained in:
parent
6e064adb09
commit
002a4a1d7f
@ -9,13 +9,14 @@ class DeepInfra(Openai):
|
|||||||
label = "DeepInfra"
|
label = "DeepInfra"
|
||||||
url = "https://deepinfra.com"
|
url = "https://deepinfra.com"
|
||||||
working = True
|
working = True
|
||||||
|
needs_auth = False
|
||||||
has_auth = True
|
has_auth = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
default_model = "meta-llama/Meta-Llama-3-70b-instruct"
|
default_model = "meta-llama/Meta-Llama-3-70b-instruct"
|
||||||
default_vision_model = "llava-hf/llava-1.5-7b-hf"
|
default_vision_model = "llava-hf/llava-1.5-7b-hf"
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
'mixtral-8x22b': 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1'
|
'dbrx-instruct': 'databricks/dbrx-instruct',
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -6,12 +6,14 @@ from aiohttp import ClientSession, BaseConnector
|
|||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests.raise_for_status import raise_for_status
|
from ..requests.raise_for_status import raise_for_status
|
||||||
|
from ..providers.conversation import BaseConversation
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import format_prompt, get_connector
|
from .helper import format_prompt, get_connector, get_cookies
|
||||||
|
|
||||||
class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://huggingface.co/chat"
|
url = "https://huggingface.co/chat"
|
||||||
working = True
|
working = True
|
||||||
|
needs_auth = True
|
||||||
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||||
models = [
|
models = [
|
||||||
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
||||||
@ -22,9 +24,6 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
'mistralai/Mistral-7B-Instruct-v0.2',
|
'mistralai/Mistral-7B-Instruct-v0.2',
|
||||||
'meta-llama/Meta-Llama-3-70B-Instruct'
|
'meta-llama/Meta-Llama-3-70B-Instruct'
|
||||||
]
|
]
|
||||||
model_aliases = {
|
|
||||||
"openchat/openchat_3.5": "openchat/openchat-3.5-0106",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls):
|
def get_models(cls):
|
||||||
@ -45,9 +44,16 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
connector: BaseConnector = None,
|
connector: BaseConnector = None,
|
||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
|
conversation: Conversation = None,
|
||||||
|
return_conversation: bool = False,
|
||||||
|
delete_conversation: bool = True,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
options = {"model": cls.get_model(model)}
|
options = {"model": cls.get_model(model)}
|
||||||
|
if cookies is None:
|
||||||
|
cookies = get_cookies("huggingface.co", False)
|
||||||
|
if return_conversation:
|
||||||
|
delete_conversation = False
|
||||||
|
|
||||||
system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"])
|
system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"])
|
||||||
if system_prompt:
|
if system_prompt:
|
||||||
@ -61,9 +67,14 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
headers=headers,
|
headers=headers,
|
||||||
connector=get_connector(connector, proxy)
|
connector=get_connector(connector, proxy)
|
||||||
) as session:
|
) as session:
|
||||||
|
if conversation is None:
|
||||||
async with session.post(f"{cls.url}/conversation", json=options) as response:
|
async with session.post(f"{cls.url}/conversation", json=options) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
conversation_id = (await response.json())["conversationId"]
|
conversation_id = (await response.json())["conversationId"]
|
||||||
|
if return_conversation:
|
||||||
|
yield Conversation(conversation_id)
|
||||||
|
else:
|
||||||
|
conversation_id = conversation.conversation_id
|
||||||
async with session.get(f"{cls.url}/conversation/{conversation_id}/__data.json") as response:
|
async with session.get(f"{cls.url}/conversation/{conversation_id}/__data.json") as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
data: list = (await response.json())["nodes"][1]["data"]
|
data: list = (await response.json())["nodes"][1]["data"]
|
||||||
@ -72,7 +83,7 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
message_id: str = data[message_keys["id"]]
|
message_id: str = data[message_keys["id"]]
|
||||||
options = {
|
options = {
|
||||||
"id": message_id,
|
"id": message_id,
|
||||||
"inputs": format_prompt(messages),
|
"inputs": format_prompt(messages) if conversation is None else messages[-1]["content"],
|
||||||
"is_continue": False,
|
"is_continue": False,
|
||||||
"is_retry": False,
|
"is_retry": False,
|
||||||
"web_search": web_search
|
"web_search": web_search
|
||||||
@ -92,5 +103,10 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
yield token
|
yield token
|
||||||
elif line["type"] == "finalAnswer":
|
elif line["type"] == "finalAnswer":
|
||||||
break
|
break
|
||||||
|
if delete_conversation:
|
||||||
async with session.delete(f"{cls.url}/conversation/{conversation_id}") as response:
|
async with session.delete(f"{cls.url}/conversation/{conversation_id}") as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
|
|
||||||
|
class Conversation(BaseConversation):
|
||||||
|
def __init__(self, conversation_id: str) -> None:
|
||||||
|
self.conversation_id = conversation_id
|
@ -11,7 +11,7 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
url = "https://www.llama2.ai"
|
url = "https://www.llama2.ai"
|
||||||
working = True
|
working = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
default_model = "meta/llama-3-70b-chat"
|
default_model = "meta/meta-llama-3-70b-instruct"
|
||||||
models = [
|
models = [
|
||||||
"meta/llama-2-7b-chat",
|
"meta/llama-2-7b-chat",
|
||||||
"meta/llama-2-13b-chat",
|
"meta/llama-2-13b-chat",
|
||||||
@ -20,8 +20,8 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"meta/meta-llama-3-70b-instruct",
|
"meta/meta-llama-3-70b-instruct",
|
||||||
]
|
]
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"meta-llama/Meta-Llama-3-8b-instruct": "meta/meta-llama-3-8b-instruct",
|
"meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct",
|
||||||
"meta-llama/Meta-Llama-3-70b-instruct": "meta/meta-llama-3-70b-instruct",
|
"meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct",
|
||||||
"meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
|
"meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
|
||||||
"meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
|
"meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
|
||||||
"meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
|
"meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
|
||||||
|
@ -11,6 +11,9 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
url = "https://replicate.com"
|
url = "https://replicate.com"
|
||||||
working = True
|
working = True
|
||||||
default_model = "meta/meta-llama-3-70b-instruct"
|
default_model = "meta/meta-llama-3-70b-instruct"
|
||||||
|
model_aliases = {
|
||||||
|
"meta-llama/Meta-Llama-3-70B-Instruct": default_model
|
||||||
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
@ -29,7 +29,9 @@ _cookies: Dict[str, Cookies] = {}
|
|||||||
DOMAINS = [
|
DOMAINS = [
|
||||||
".bing.com",
|
".bing.com",
|
||||||
".meta.ai",
|
".meta.ai",
|
||||||
".google.com"
|
".google.com",
|
||||||
|
"www.whiterabbitneo.com",
|
||||||
|
"huggingface.co"
|
||||||
]
|
]
|
||||||
|
|
||||||
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
|
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
|
||||||
|
@ -25,6 +25,7 @@ from .Provider import (
|
|||||||
Llama,
|
Llama,
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
PerplexityLabs,
|
PerplexityLabs,
|
||||||
|
Replicate,
|
||||||
Pi,
|
Pi,
|
||||||
Vercel,
|
Vercel,
|
||||||
You,
|
You,
|
||||||
@ -137,19 +138,19 @@ llama2_13b = Model(
|
|||||||
llama2_70b = Model(
|
llama2_70b = Model(
|
||||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||||
base_provider = "meta",
|
base_provider = "meta",
|
||||||
best_provider = RetryProvider([Llama, DeepInfra, HuggingChat])
|
best_provider = RetryProvider([Llama, DeepInfra])
|
||||||
)
|
)
|
||||||
|
|
||||||
llama3_8b_instruct = Model(
|
llama3_8b_instruct = Model(
|
||||||
name = "meta-llama/Meta-Llama-3-8b-instruct",
|
name = "meta-llama/Meta-Llama-3-8B-Instruct",
|
||||||
base_provider = "meta",
|
base_provider = "meta",
|
||||||
best_provider = RetryProvider([Llama])
|
best_provider = RetryProvider([Llama, DeepInfra, Replicate])
|
||||||
)
|
)
|
||||||
|
|
||||||
llama3_70b_instruct = Model(
|
llama3_70b_instruct = Model(
|
||||||
name = "meta-llama/Meta-Llama-3-70b-instruct",
|
name = "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||||
base_provider = "meta",
|
base_provider = "meta",
|
||||||
best_provider = RetryProvider([Llama, HuggingChat])
|
best_provider = RetryProvider([Llama, DeepInfra])
|
||||||
)
|
)
|
||||||
|
|
||||||
codellama_34b_instruct = Model(
|
codellama_34b_instruct = Model(
|
||||||
@ -168,7 +169,7 @@ codellama_70b_instruct = Model(
|
|||||||
mixtral_8x7b = Model(
|
mixtral_8x7b = Model(
|
||||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat, HuggingFace, PerplexityLabs])
|
best_provider = RetryProvider([DeepInfra, HuggingFace, PerplexityLabs])
|
||||||
)
|
)
|
||||||
|
|
||||||
mistral_7b = Model(
|
mistral_7b = Model(
|
||||||
@ -186,7 +187,7 @@ mistral_7b_v02 = Model(
|
|||||||
mixtral_8x22b = Model(
|
mixtral_8x22b = Model(
|
||||||
name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([HuggingChat, DeepInfra])
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# Misc models
|
# Misc models
|
||||||
@ -211,7 +212,7 @@ airoboros_70b = Model(
|
|||||||
openchat_35 = Model(
|
openchat_35 = Model(
|
||||||
name = "openchat/openchat_3.5",
|
name = "openchat/openchat_3.5",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# Bard
|
# Bard
|
||||||
|
Loading…
Reference in New Issue
Block a user