mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-24 11:34:02 +03:00
feat(g4f/Provider/HuggingChat.): add Qwen2.5-72B model and alias
This commit is contained in:
parent
0c18ed5ce2
commit
a6099ba48b
@ -154,7 +154,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
async for chunk in response.content.iter_any():
|
async for chunk in response.content.iter_any():
|
||||||
if chunk:
|
if chunk:
|
||||||
decoded_chunk = chunk.decode()
|
decoded_chunk = chunk.decode()
|
||||||
# Видаляємо префікс $@$v=v1.10-rv1$@$ та інші подібні
|
|
||||||
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
|
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
|
||||||
if decoded_chunk.strip(): # Перевіряємо, чи залишився якийсь текст після видалення префікса
|
if decoded_chunk.strip():
|
||||||
yield decoded_chunk
|
yield decoded_chunk
|
||||||
|
@ -16,6 +16,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
models = [
|
models = [
|
||||||
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
||||||
'CohereForAI/c4ai-command-r-plus-08-2024',
|
'CohereForAI/c4ai-command-r-plus-08-2024',
|
||||||
|
'Qwen/Qwen2.5-72B-Instruct',
|
||||||
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||||
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
|
||||||
'mistralai/Mistral-7B-Instruct-v0.3',
|
'mistralai/Mistral-7B-Instruct-v0.3',
|
||||||
@ -25,6 +26,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
model_aliases = {
|
model_aliases = {
|
||||||
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||||
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
|
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
|
||||||
|
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
|
||||||
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||||
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||||
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
|
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
|
||||||
|
@ -443,7 +443,7 @@ qwen_1_5_110b = Model(
|
|||||||
qwen_2_72b = Model(
|
qwen_2_72b = Model(
|
||||||
name = 'qwen-2-72b',
|
name = 'qwen-2-72b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Airforce])
|
best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_turbo = Model(
|
qwen_turbo = Model(
|
||||||
|
Loading…
Reference in New Issue
Block a user