Adding new providers AiChats.py Nexra.py Snova.py

This commit is contained in:
kqlio67 2024-09-04 02:09:29 +03:00
parent 21c94f221d
commit 7483a7c310
5 changed files with 380 additions and 9 deletions

66
g4f/Provider/AiChats.py Normal file
View File

@ -0,0 +1,66 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class AiChats(AsyncGeneratorProvider):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
working = True
supports_gpt_4 = True
supports_message_history = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"referer": f"{cls.url}/chat/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"type": "chat",
"messagesHistory": [
{
"from": "you",
"content": prompt
}
]
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = await response.text()
message = ""
for line in full_response.split('\n'):
if line.startswith('data: ') and line != 'data: ':
message += line[6:]
message = message.strip()
yield message

111
g4f/Provider/Nexra.py Normal file
View File

@ -0,0 +1,111 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://nexra.aryahcr.cc"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-3.5-turbo'
models = [
# Working with text
'gpt-4',
'gpt-4-0613',
'gpt-4-32k',
'gpt-4-0314',
'gpt-4-32k-0314',
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-0301',
'gpt-3',
'text-davinci-003',
'text-davinci-002',
'code-davinci-002',
'text-curie-001',
'text-babbage-001',
'text-ada-001',
'davinci',
'curie',
'babbage',
'ada',
'babbage-002',
'davinci-002',
]
model_aliases = {
"gpt-4": "gpt-4-0613",
"gpt-4": "gpt-4-32k",
"gpt-4": "gpt-4-0314",
"gpt-4": "gpt-4-32k-0314",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-0301",
"gpt-3": "text-davinci-003",
"gpt-3": "text-davinci-002",
"gpt-3": "code-davinci-002",
"gpt-3": "text-curie-001",
"gpt-3": "text-babbage-001",
"gpt-3": "text-ada-001",
"gpt-3": "text-ada-001",
"gpt-3": "davinci",
"gpt-3": "curie",
"gpt-3": "babbage",
"gpt-3": "ada",
"gpt-3": "babbage-002",
"gpt-3": "davinci-002",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Content-Type": "application/json",
}
async with ClientSession(headers=headers) as session:
data = {
"messages": messages,
"prompt": format_prompt(messages),
"model": model,
"markdown": False,
"stream": False,
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
result = await response.text()
json_result = json.loads(result)
yield json_result["gpt"]

133
g4f/Provider/Snova.py Normal file
View File

@ -0,0 +1,133 @@
from __future__ import annotations
import json
from typing import AsyncGenerator
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Snova(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://fast.snova.ai"
api_endpoint = "https://fast.snova.ai/api/completion"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'Meta-Llama-3.1-8B-Instruct'
models = [
'Meta-Llama-3.1-8B-Instruct',
'Meta-Llama-3.1-70B-Instruct',
'Meta-Llama-3.1-405B-Instruct',
'Samba-CoE',
'ignos/Mistral-T5-7B-v1',
'v1olet/v1olet_merged_dpo_7B',
'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
'cookinai/DonutLM-v1',
]
model_aliases = {
"llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
"llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
"llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
"mistral-7b": "ignos/Mistral-T5-7B-v1",
"samba-coe-v0.1": "Samba-CoE",
"v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
"westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
"donutlm-v1": "cookinai/DonutLM-v1",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncGenerator[str, None]:
model = cls.get_model(model)
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"body": {
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": format_prompt(messages),
"id": "1-id",
"ref": "1-ref",
"revision": 1,
"draft": False,
"status": "done",
"enableRealTimeChat": False,
"meta": None
}
],
"max_tokens": 1000,
"stop": ["<|eot_id|>"],
"stream": True,
"stream_options": {"include_usage": True},
"model": model
},
"env_type": "tp16"
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = ""
async for line in response.content:
line = line.decode().strip()
if line.startswith("data: "):
data = line[6:]
if data == "[DONE]":
break
try:
json_data = json.loads(data)
choices = json_data.get("choices", [])
if choices:
delta = choices[0].get("delta", {})
content = delta.get("content", "")
full_response += content
except json.JSONDecodeError:
continue
except Exception as e:
print(f"Error processing chunk: {e}")
print(f"Problematic data: {data}")
continue
yield full_response.strip()

View File

@ -12,6 +12,7 @@ from .needs_auth import *
from .AI365VIP import AI365VIP
from .Allyfy import Allyfy
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
@ -42,11 +43,13 @@ from .MagickPenAsk import MagickPenAsk
from .MagickPenChat import MagickPenChat
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Nexra import Nexra
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Snova import Snova
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks

View File

@ -29,6 +29,7 @@ from .Provider import (
MagickPenAsk,
MagickPenChat,
MetaAI,
Nexra,
OpenaiChat,
PerplexityLabs,
Pi,
@ -36,6 +37,7 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
Snova,
TeachAnything,
TwitterBio,
Upstage,
@ -86,20 +88,28 @@ default = Model(
############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
# gpt-3
gpt_3 = Model(
name = 'gpt-3',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Nexra,
])
)
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Allyfy, TwitterBio,
Allyfy, TwitterBio, Nexra,
])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots, Chatgpt4o, OpenaiChat,
])
@ -107,7 +117,7 @@ gpt_4o = Model(
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
base_provider = 'OpenAI',
best_provider = IterListProvider([
DDG, Liaobots, You, FreeNetfly, MagickPenAsk, MagickPenChat, Pizzagpt, ChatgptFree, AiChatOnline, OpenaiChat, Koala,
])
@ -115,17 +125,17 @@ gpt_4o_mini = Model(
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots, Bing
Nexra, Liaobots, Bing
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Chatgpt4Online, Bing,
Chatgpt4Online, Nexra, Bing,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
])
)
@ -358,6 +368,35 @@ pi = Model(
best_provider = Pi
)
### SambaNova ###
samba_coe_v0_1 = Model(
name = 'samba-coe-v0.1',
base_provider = 'SambaNova',
best_provider = Snova
)
### Trong-Hieu Nguyen-Mau ###
v1olet_merged_7b = Model(
name = 'v1olet-merged-7b',
base_provider = 'Trong-Hieu Nguyen-Mau',
best_provider = Snova
)
### Macadeliccc ###
westlake_7b_v2 = Model(
name = 'westlake-7b-v2',
base_provider = 'Macadeliccc',
best_provider = Snova
)
### CookinAI ###
donutlm_v1 = Model(
name = 'donutlm-v1',
base_provider = 'CookinAI',
best_provider = Snova
)
#############
### Image ###
@ -436,6 +475,9 @@ class ModelUtils:
############
### OpenAI ###
# gpt-3
'gpt-3': gpt_3,
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
@ -538,6 +580,22 @@ class ModelUtils:
'pi': pi,
### SambaNova ###
'samba-coe-v0.1': samba_coe_v0_1,
### Trong-Hieu Nguyen-Mau ###
'v1olet-merged-7b': v1olet_merged_7b,
### Macadeliccc ###
'westlake-7b-v2': westlake_7b_v2,
### CookinAI ###
'donutlm-v1': donutlm_v1,
#############
### Image ###