Fix MyShell Provider

This commit is contained in:
Heiner Lohaus 2023-10-22 08:57:31 +02:00
parent a3af9fac3e
commit 13e89d6ab9
10 changed files with 115 additions and 30 deletions

View File

@ -83,8 +83,8 @@ class FakeGpt(AsyncGeneratorProvider):
line = json.loads(line)
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
yield new_message[len(last_message):]
last_message = new_message
except:
continue
if not last_message:

View File

@ -21,12 +21,12 @@ class GeekGpt(BaseProvider):
json_data = {
'messages': messages,
'model': model,
'temperature': kwargs.get('temperature', 0.9),
'presence_penalty': kwargs.get('presence_penalty', 0),
'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'stream': True
'model': model,
'temperature': kwargs.get('temperature', 0.9),
'presence_penalty': kwargs.get('presence_penalty', 0),
'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'stream': True
}
data = dumps(json_data, separators=(',', ':'))
@ -61,7 +61,6 @@ class GeekGpt(BaseProvider):
try:
content = json.loads(json_data)["choices"][0]["delta"].get("content")
except Exception as e:
raise RuntimeError(f'error | {e} :', json_data)

View File

@ -30,7 +30,7 @@ models = {
class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.site"
working = False
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None

89
g4f/Provider/MyShell.py Normal file
View File

@ -0,0 +1,89 @@
from __future__ import annotations
import time, random, json
from ..requests import StreamSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class MyShell(AsyncGeneratorProvider):
url = "https://api.myshell.ai/v1/bot/chat/send_message"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
headers = {
"User-Agent": user_agent,
"Myshell-Service-Name": "organics-api",
"Visitor-Id": generate_visitor_id(user_agent)
}
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout,
headers=headers
) as session:
prompt = format_prompt(messages)
data = {
"botId": "1",
"conversation_scenario": 3,
"message": prompt,
"messageType": 1
}
async with session.post(cls.url, json=data) as response:
response.raise_for_status()
event = None
async for line in response.iter_lines():
if line.startswith(b"event: "):
event = line[7:]
elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT":
if line.startswith(b"data: "):
yield json.loads(line[6:])["content"]
if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED":
break
def xor_hash(B: str):
r = []
i = 0
def o(e, t):
o_val = 0
for i in range(len(t)):
o_val |= r[i] << (8 * i)
return e ^ o_val
for e in range(len(B)):
t = ord(B[e])
r.insert(0, 255 & t)
if len(r) >= 4:
i = o(i, r)
r = []
if len(r) > 0:
i = o(i, r)
return hex(i)[2:]
def performance() -> str:
t = int(time.time() * 1000)
e = 0
while t == int(time.time() * 1000):
e += 1
return hex(t)[2:] + hex(e)[2:]
def generate_visitor_id(user_agent: str) -> str:
f = performance()
r = hex(int(random.random() * (16**16)))[2:-2]
d = xor_hash(user_agent)
e = hex(1080 * 1920)[2:]
return f"{f}-{r}-{d}-{e}-{f}"

View File

@ -4,7 +4,6 @@ import json, base64, requests, execjs, random, uuid
from ..typing import Messages, TypedDict, CreateResult, Any
from .base_provider import BaseProvider
from abc import abstractmethod
from ..debug import logging
@ -15,12 +14,13 @@ class Vercel(BaseProvider):
supports_stream = True
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
proxy: str = None, **kwargs) -> CreateResult:
proxy: str = None,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
@ -65,7 +65,7 @@ class Vercel(BaseProvider):
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
response.raise_for_status()
except Exception:
except:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()

View File

@ -1,7 +1,7 @@
from __future__ import annotations
import random
from aiohttp import ClientSession
from ..requests import StreamSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -19,13 +19,13 @@ class Yqcloud(AsyncGeneratorProvider):
proxy: str = None,
**kwargs,
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
async with StreamSession(
headers=_create_header(), proxies={"https": proxy}
) as session:
payload = _create_payload(messages, **kwargs)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
async for chunk in response.iter_content():
if chunk:
chunk = chunk.decode()
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
@ -38,6 +38,7 @@ def _create_header():
"accept" : "application/json, text/plain, */*",
"content-type" : "application/json",
"origin" : "https://chat9.yqcloud.top",
"referer" : "https://chat9.yqcloud.top/"
}

View File

@ -26,7 +26,7 @@ from .GptGo import GptGo
from .GptGod import GptGod
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .Myshell import Myshell
from .MyShell import MyShell
from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .Phind import Phind
@ -90,6 +90,7 @@ class ProviderUtils:
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
'MyShell': MyShell,
'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
@ -159,6 +160,7 @@ __all__ = [
'Llama2',
'Lockchat',
'Myshell',
'MyShell',
'NoowAi',
'Opchatgpts',
'Raycast',

View File

@ -8,8 +8,8 @@ from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
models = {

View File

@ -12,4 +12,5 @@ from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .H2o import H2o
from .H2o import H2o
from .Myshell import Myshell

View File

@ -3,18 +3,13 @@ from dataclasses import dataclass
from .typing import Union
from .Provider import BaseProvider, RetryProvider
from .Provider import (
ChatgptLogin,
ChatgptDuo,
GptForLove,
ChatgptAi,
GptChatly,
Liaobots,
ChatgptX,
ChatBase,
Yqcloud,
GeekGpt,
FakeGpt,
Myshell,
FreeGpt,
NoowAi,
Vercel,
@ -23,9 +18,7 @@ from .Provider import (
AiAsk,
GptGo,
Phind,
Ylokh,
Bard,
Aibn,
Bing,
You,
H2o,