feat(g4f/Provider/HuggingChat.): add Qwen2.5-72B model and alias

This commit is contained in:
kqlio67 2024-09-25 08:01:39 +03:00
parent 0c18ed5ce2
commit a6099ba48b
3 changed files with 4 additions and 3 deletions

View File

@ -154,7 +154,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
if chunk: if chunk:
decoded_chunk = chunk.decode() decoded_chunk = chunk.decode()
# Видаляємо префікс $@$v=v1.10-rv1$@$ та інші подібні
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
if decoded_chunk.strip(): # Перевіряємо, чи залишився якийсь текст після видалення префікса if decoded_chunk.strip():
yield decoded_chunk yield decoded_chunk

View File

@ -16,6 +16,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
models = [ models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct', 'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024', 'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'mistralai/Mistral-7B-Instruct-v0.3', 'mistralai/Mistral-7B-Instruct-v0.3',
@ -25,6 +26,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",

View File

@ -443,7 +443,7 @@ qwen_1_5_110b = Model(
qwen_2_72b = Model( qwen_2_72b = Model(
name = 'qwen-2-72b', name = 'qwen-2-72b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, Airforce]) best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
) )
qwen_turbo = Model( qwen_turbo = Model(