mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-22 18:41:41 +03:00
Provider removed (g4f/Provider/ChatHub.py)
This commit is contained in:
parent
ed1b339726
commit
75549df2ba
@ -37,7 +37,6 @@ This document provides an overview of various AI providers and models, including
|
||||
|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|
||||
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|
||||
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|
||||
|[app.chathub.gg](https://app.chathub.gg)|`g4f.Provider.ChatHub`|`llama-3.1-8b, mixtral-8x7b, gemma-2, sonar-online`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|
||||
|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|
||||
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`german-7b, gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-11b, llama-3.2-1b, llama-3.2-3b, mistral-7b, openchat-3.5, phi-2, qwen-1.5-0.5b, qwen-1.5-1.8b, qwen-1.5-14b, qwen-1.5-7b, tinyllama-1.1b, cybertron-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|
||||
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|
||||
@ -144,7 +143,6 @@ This document provides an overview of various AI providers and models, including
|
||||
|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)|
|
||||
|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)|
|
||||
|gemma-7b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-7b)|
|
||||
|gemma-2|Google|2+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|
||||
|gemma_2_27b|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|
||||
|claude-2.1|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|
||||
|claude-3-haiku|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|
||||
|
@ -1,84 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatHub(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "ChatHub"
|
||||
url = "https://app.chathub.gg"
|
||||
api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'meta/llama3.1-8b'
|
||||
models = [
|
||||
'meta/llama3.1-8b',
|
||||
'mistral/mixtral-8x7b',
|
||||
'google/gemma-2',
|
||||
'perplexity/sonar-online',
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"llama-3.1-8b": "meta/llama3.1-8b",
|
||||
"mixtral-8x7b": "mistral/mixtral-8x7b",
|
||||
"gemma-2": "google/gemma-2",
|
||||
"sonar-online": "perplexity/sonar-online",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'application/json',
|
||||
'origin': cls.url,
|
||||
'referer': f"{cls.url}/chat/cloud-llama3.1-8b",
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
|
||||
'x-app-id': 'web'
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"tools": []
|
||||
}
|
||||
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data:'):
|
||||
try:
|
||||
data = json.loads(decoded_line[5:])
|
||||
if data['type'] == 'text-delta':
|
||||
yield data['textDelta']
|
||||
elif data['type'] == 'done':
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
@ -31,7 +31,6 @@ from .Chatgpt4Online import Chatgpt4Online
|
||||
from .Chatgpt4o import Chatgpt4o
|
||||
from .ChatGptEs import ChatGptEs
|
||||
from .ChatgptFree import ChatgptFree
|
||||
from .ChatHub import ChatHub
|
||||
from .ChatifyAI import ChatifyAI
|
||||
from .Cloudflare import Cloudflare
|
||||
from .DarkAI import DarkAI
|
||||
|
@ -16,7 +16,6 @@ from .Provider import (
|
||||
Chatgpt4Online,
|
||||
ChatGptEs,
|
||||
ChatgptFree,
|
||||
ChatHub,
|
||||
ChatifyAI,
|
||||
Cloudflare,
|
||||
DarkAI,
|
||||
@ -101,9 +100,7 @@ default = Model(
|
||||
MagickPen,
|
||||
DeepInfraChat,
|
||||
Airforce,
|
||||
ChatHub,
|
||||
ChatGptEs,
|
||||
ChatHub,
|
||||
ChatifyAI,
|
||||
Cloudflare,
|
||||
Editee,
|
||||
@ -217,7 +214,7 @@ llama_3_70b = Model(
|
||||
llama_3_1_8b = Model(
|
||||
name = "llama-3.1-8b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, PerplexityLabs])
|
||||
best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs])
|
||||
)
|
||||
|
||||
llama_3_1_70b = Model(
|
||||
@ -294,7 +291,7 @@ mistral_7b = Model(
|
||||
mixtral_8x7b = Model(
|
||||
name = "mixtral-8x7b",
|
||||
base_provider = "Mistral",
|
||||
best_provider = IterListProvider([DDG, ReplicateHome, DeepInfraChat, ChatHub, Airforce, DeepInfra])
|
||||
best_provider = IterListProvider([DDG, ReplicateHome, DeepInfraChat, Airforce, DeepInfra])
|
||||
)
|
||||
|
||||
mixtral_8x22b = Model(
|
||||
@ -395,12 +392,6 @@ gemma_7b = Model(
|
||||
)
|
||||
|
||||
# gemma 2
|
||||
gemma_2 = Model(
|
||||
name = 'gemma-2',
|
||||
base_provider = 'Google',
|
||||
best_provider = ChatHub
|
||||
)
|
||||
|
||||
gemma_2_9b = Model(
|
||||
name = 'gemma-2-9b',
|
||||
base_provider = 'Google',
|
||||
@ -674,7 +665,7 @@ grok_2_mini = Model(
|
||||
sonar_online = Model(
|
||||
name = 'sonar-online',
|
||||
base_provider = 'Perplexity AI',
|
||||
best_provider = IterListProvider([ChatHub, PerplexityLabs])
|
||||
best_provider = IterListProvider([PerplexityLabs])
|
||||
)
|
||||
|
||||
sonar_chat = Model(
|
||||
@ -992,7 +983,6 @@ class ModelUtils:
|
||||
'gemma-7b': gemma_7b,
|
||||
|
||||
# gemma-2
|
||||
'gemma-2': gemma_2,
|
||||
'gemma-2-9b': gemma_2_9b,
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user