Updated g4f/models.py g4f/Provider/Liaobots.py g4f/Provider/__init__.py g4f/Provider/DeepInfraChat.py

This commit is contained in:
kqlio67 2024-09-27 00:24:44 +03:00
parent 9ff384978c
commit cfe5acc152
4 changed files with 27 additions and 65 deletions

View File

@ -39,7 +39,7 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
@ -47,9 +47,9 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
"gemma-2b-27b": "google/gemma-2-27b-it",
"minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5",
"minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"lzlv_70b": "lizpreciatior/lzlv_70b_fp16_hf",
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
"openchat-3.6-8b": "openchat/openchat-3.6-8b",
"phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
"dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",

View File

@ -36,36 +36,18 @@ models = {
"tokenLimit": 7800,
"context": "8K",
},
"o1-preview": {
"id": "o1-preview",
"name": "o1-preview",
"model": "o1",
"provider": "OpenAI",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
},
"o1-mini": {
"id": "o1-mini",
"name": "o1-mini",
"model": "o1",
"provider": "OpenAI",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
},
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
"gpt-4o-2024-08-06": {
"id": "gpt-4o-2024-08-06",
"name": "GPT-4o",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4o-2024-08-06": {
"id": "gpt-4o-2024-08-06",
"name": "GPT-4o",
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
@ -117,18 +99,18 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
@ -153,8 +135,8 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
"gemini-1.5-flash-exp-0827": {
"id": "gemini-1.5-flash-exp-0827",
"gemini-1.5-flash-002": {
"id": "gemini-1.5-flash-002",
"name": "Gemini-1.5-Flash-1M",
"model": "Gemini",
"provider": "Google",
@ -162,8 +144,8 @@ models = {
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-pro-exp-0827": {
"id": "gemini-1.5-pro-exp-0827",
"gemini-1.5-pro-002": {
"id": "gemini-1.5-pro-002",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
@ -186,11 +168,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
"o1": "o1-preview",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4": "gpt-4-0613",
"claude-3-opus": "claude-3-opus-20240229",
@ -201,8 +181,8 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",
"gemini-flash": "gemini-1.5-flash-exp-0827",
"gemini-pro": "gemini-1.5-pro-exp-0827",
"gemini-flash": "gemini-1.5-flash-002",
"gemini-pro": "gemini-1.5-pro-002",
}
_auth_code = ""

View File

@ -10,6 +10,7 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .Allyfy import Allyfy
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats

View File

@ -4,6 +4,7 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
AIChatFree,
Airforce,
Allyfy,
Bing,
@ -146,22 +147,6 @@ gpt_4 = Model(
])
)
# o1
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots
])
)
o1_mini = Model(
name = 'o1-mini',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots
])
)
### GigaChat ###
gigachat = Model(
@ -288,7 +273,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, Liaobots, Airforce])
best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, AIChatFree, Liaobots, Airforce])
)
gemini_flash = Model(
@ -785,10 +770,6 @@ class ModelUtils:
'gpt-4': gpt_4,
'gpt-4-turbo': gpt_4_turbo,
# o1
'o1': o1,
'o1-mini': o1_mini,
### Meta ###
"meta-ai": meta,