mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 19:11:48 +03:00
Add support for all models
Add AbstractProvider class Add ProviderType type Add get_last_provider function Add version module and VersionUtils Display used provider in gui Fix error response in api
This commit is contained in:
parent
ebc522150e
commit
c617b18d12
@ -4,12 +4,12 @@ import time
|
||||
import random
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from .helper import format_prompt, get_random_string
|
||||
from ..webdriver import WebDriver, WebDriverSession
|
||||
from .. import debug
|
||||
|
||||
class AItianhuSpace(BaseProvider):
|
||||
class AItianhuSpace(AbstractProvider):
|
||||
url = "https://chat3.aiyunos.top/"
|
||||
working = True
|
||||
supports_stream = True
|
||||
|
@ -2,9 +2,9 @@ from __future__ import annotations
|
||||
|
||||
import requests, json
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
|
||||
class DeepInfra(BaseProvider):
|
||||
class DeepInfra(AbstractProvider):
|
||||
url: str = "https://deepinfra.com"
|
||||
working: bool = True
|
||||
supports_stream: bool = True
|
||||
@ -14,8 +14,10 @@ class DeepInfra(BaseProvider):
|
||||
def create_completion(model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
auth: str = None,
|
||||
**kwargs) -> CreateResult:
|
||||
|
||||
if not model:
|
||||
model = 'meta-llama/Llama-2-70b-chat-hf'
|
||||
headers = {
|
||||
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'Cache-Control': 'no-cache',
|
||||
@ -34,9 +36,11 @@ class DeepInfra(BaseProvider):
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
}
|
||||
if auth:
|
||||
headers['Authorization'] = f"bearer {auth}"
|
||||
|
||||
json_data = json.dumps({
|
||||
'model' : 'meta-llama/Llama-2-70b-chat-hf',
|
||||
'model' : model,
|
||||
'messages': messages,
|
||||
'stream' : True}, separators=(',', ':'))
|
||||
|
||||
@ -45,18 +49,17 @@ class DeepInfra(BaseProvider):
|
||||
|
||||
response.raise_for_status()
|
||||
first = True
|
||||
|
||||
for line in response.iter_content(chunk_size=1024):
|
||||
for line in response.content:
|
||||
if line.startswith(b"data: [DONE]"):
|
||||
break
|
||||
|
||||
elif line.startswith(b"data: "):
|
||||
chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
|
||||
|
||||
try:
|
||||
chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
|
||||
except Exception:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
if chunk:
|
||||
if first:
|
||||
chunk = chunk.lstrip()
|
||||
if chunk:
|
||||
first = False
|
||||
|
||||
yield (chunk)
|
||||
yield chunk
|
@ -1,12 +1,12 @@
|
||||
from __future__ import annotations
|
||||
import requests, json
|
||||
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from ..typing import CreateResult, Messages
|
||||
from json import dumps
|
||||
|
||||
|
||||
class GeekGpt(BaseProvider):
|
||||
class GeekGpt(AbstractProvider):
|
||||
url = 'https://chat.geekgpt.org'
|
||||
working = True
|
||||
supports_message_history = True
|
||||
|
@ -3,11 +3,11 @@ from __future__ import annotations
|
||||
import time, json
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from .helper import format_prompt
|
||||
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare
|
||||
|
||||
class MyShell(BaseProvider):
|
||||
class MyShell(AbstractProvider):
|
||||
url = "https://app.myshell.ai/chat"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -7,11 +7,11 @@ from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from .helper import format_prompt
|
||||
from ..webdriver import WebDriver, WebDriverSession
|
||||
|
||||
class PerplexityAi(BaseProvider):
|
||||
class PerplexityAi(AbstractProvider):
|
||||
url = "https://www.perplexity.ai"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -1,12 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider, format_prompt
|
||||
|
||||
import json
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import AbstractProvider, format_prompt
|
||||
from ..requests import Session, get_session_from_browser
|
||||
|
||||
class Pi(BaseProvider):
|
||||
class Pi(AbstractProvider):
|
||||
url = "https://pi.ai/talk"
|
||||
working = True
|
||||
supports_stream = True
|
||||
|
@ -3,10 +3,10 @@ from __future__ import annotations
|
||||
import time, json, time
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from ..webdriver import WebDriver, WebDriverSession
|
||||
|
||||
class TalkAi(BaseProvider):
|
||||
class TalkAi(AbstractProvider):
|
||||
url = "https://talkai.info"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -3,10 +3,10 @@ from __future__ import annotations
|
||||
import json, base64, requests, execjs, random, uuid
|
||||
|
||||
from ..typing import Messages, TypedDict, CreateResult, Any
|
||||
from .base_provider import BaseProvider
|
||||
from .base_provider import AbstractProvider
|
||||
from ..debug import logging
|
||||
|
||||
class Vercel(BaseProvider):
|
||||
class Vercel(AbstractProvider):
|
||||
url = 'https://sdk.vercel.ai'
|
||||
working = False
|
||||
supports_message_history = True
|
||||
|
@ -1,11 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
||||
from .retry_provider import RetryProvider
|
||||
from .deprecated import *
|
||||
from .needs_auth import *
|
||||
from .unfinished import *
|
||||
from .selenium import *
|
||||
from ..base_provider import BaseProvider, ProviderType
|
||||
from .retry_provider import RetryProvider
|
||||
from .base_provider import AsyncProvider, AsyncGeneratorProvider
|
||||
from .deprecated import *
|
||||
from .needs_auth import *
|
||||
from .unfinished import *
|
||||
from .selenium import *
|
||||
|
||||
from .Aura import Aura
|
||||
from .AiAsk import AiAsk
|
||||
from .Aichat import Aichat
|
||||
@ -59,7 +61,7 @@ __modules__: list = [
|
||||
getattr(sys.modules[__name__], provider) for provider in dir()
|
||||
if not provider.startswith("__")
|
||||
]
|
||||
__providers__: list[type[BaseProvider]] = [
|
||||
__providers__: list[ProviderType] = [
|
||||
provider for provider in __modules__
|
||||
if isinstance(provider, type)
|
||||
and issubclass(provider, BaseProvider)
|
||||
@ -67,9 +69,9 @@ __providers__: list[type[BaseProvider]] = [
|
||||
__all__: list[str] = [
|
||||
provider.__name__ for provider in __providers__
|
||||
]
|
||||
__map__: dict[str, type[BaseProvider]] = dict([
|
||||
__map__: dict[str, ProviderType] = dict([
|
||||
(provider.__name__, provider) for provider in __providers__
|
||||
])
|
||||
|
||||
class ProviderUtils:
|
||||
convert: dict[str, type[BaseProvider]] = __map__
|
||||
convert: dict[str, ProviderType] = __map__
|
@ -1,12 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import asyncio
|
||||
from asyncio import AbstractEventLoop
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from abc import ABC, abstractmethod
|
||||
from abc import abstractmethod
|
||||
from inspect import signature, Parameter
|
||||
from .helper import get_event_loop, get_cookies, format_prompt
|
||||
from ..typing import CreateResult, AsyncResult, Messages
|
||||
from ..typing import CreateResult, AsyncResult, Messages, Union
|
||||
from ..base_provider import BaseProvider
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
NoneType = type(None)
|
||||
@ -20,25 +22,7 @@ if sys.platform == 'win32':
|
||||
):
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||
|
||||
class BaseProvider(ABC):
|
||||
url: str
|
||||
working: bool = False
|
||||
needs_auth: bool = False
|
||||
supports_stream: bool = False
|
||||
supports_gpt_35_turbo: bool = False
|
||||
supports_gpt_4: bool = False
|
||||
supports_message_history: bool = False
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
raise NotImplementedError()
|
||||
|
||||
class AbstractProvider(BaseProvider):
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
@ -60,9 +44,12 @@ class BaseProvider(ABC):
|
||||
**kwargs
|
||||
))
|
||||
|
||||
return await loop.run_in_executor(
|
||||
executor,
|
||||
create_func
|
||||
return await asyncio.wait_for(
|
||||
loop.run_in_executor(
|
||||
executor,
|
||||
create_func
|
||||
),
|
||||
timeout=kwargs.get("timeout", 0)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@ -102,16 +89,19 @@ class BaseProvider(ABC):
|
||||
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
|
||||
|
||||
|
||||
class AsyncProvider(BaseProvider):
|
||||
class AsyncProvider(AbstractProvider):
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = False,
|
||||
*,
|
||||
loop: AbstractEventLoop = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
loop = get_event_loop()
|
||||
if not loop:
|
||||
loop = get_event_loop()
|
||||
coro = cls.create_async(model, messages, **kwargs)
|
||||
yield loop.run_until_complete(coro)
|
||||
|
||||
@ -134,9 +124,12 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
*,
|
||||
loop: AbstractEventLoop = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
loop = get_event_loop()
|
||||
if not loop:
|
||||
loop = get_event_loop()
|
||||
generator = cls.create_async_generator(
|
||||
model,
|
||||
messages,
|
||||
@ -171,6 +164,7 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
def create_async_generator(
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
raise NotImplementedError()
|
||||
|
@ -3,10 +3,10 @@ from __future__ import annotations
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class AiService(BaseProvider):
|
||||
class AiService(AbstractProvider):
|
||||
url = "https://aiservice.vercel.app/"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -1,9 +1,10 @@
|
||||
from __future__ import annotations
|
||||
import requests
|
||||
|
||||
from ..base_provider import BaseProvider
|
||||
import requests
|
||||
import json
|
||||
|
||||
from ..base_provider import AbstractProvider
|
||||
from ...typing import CreateResult, Messages
|
||||
from json import dumps
|
||||
|
||||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
||||
models = {
|
||||
@ -17,7 +18,7 @@ models = {
|
||||
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
|
||||
}
|
||||
|
||||
class Aivvm(BaseProvider):
|
||||
class Aivvm(AbstractProvider):
|
||||
url = 'https://chat.aivvm.com'
|
||||
supports_stream = True
|
||||
working = False
|
||||
@ -44,7 +45,7 @@ class Aivvm(BaseProvider):
|
||||
"temperature" : kwargs.get("temperature", 0.7)
|
||||
}
|
||||
|
||||
data = dumps(json_data)
|
||||
data = json.dumps(json_data)
|
||||
|
||||
headers = {
|
||||
"accept" : "text/event-stream",
|
||||
|
@ -7,10 +7,10 @@ import time
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class DfeHub(BaseProvider):
|
||||
class DfeHub(AbstractProvider):
|
||||
url = "https://chat.dfehub.com/"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -2,14 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import random
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class EasyChat(BaseProvider):
|
||||
class EasyChat(AbstractProvider):
|
||||
url: str = "https://free.easychat.work"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -6,10 +6,10 @@ from abc import ABC, abstractmethod
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class Equing(BaseProvider):
|
||||
class Equing(AbstractProvider):
|
||||
url: str = 'https://next.eqing.tech/'
|
||||
working = False
|
||||
supports_stream = True
|
||||
|
@ -2,15 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class FastGpt(BaseProvider):
|
||||
class FastGpt(AbstractProvider):
|
||||
url: str = 'https://chat9.fastgpt.me/'
|
||||
working = False
|
||||
needs_auth = False
|
||||
@ -19,7 +17,6 @@ class FastGpt(BaseProvider):
|
||||
supports_gpt_4 = False
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
|
@ -5,10 +5,10 @@ import json
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class Forefront(BaseProvider):
|
||||
class Forefront(AbstractProvider):
|
||||
url = "https://forefront.com"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -11,10 +11,10 @@ except ImportError:
|
||||
from Cryptodome.Cipher import AES
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class GetGpt(BaseProvider):
|
||||
class GetGpt(AbstractProvider):
|
||||
url = 'https://chat.getgpt.world/'
|
||||
supports_stream = True
|
||||
working = False
|
||||
|
@ -5,10 +5,10 @@ import json
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class Lockchat(BaseProvider):
|
||||
class Lockchat(AbstractProvider):
|
||||
url: str = "http://supertest.lockchat.app"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -5,10 +5,10 @@ import uuid
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class V50(BaseProvider):
|
||||
class V50(AbstractProvider):
|
||||
url = 'https://p5.v50.ltd'
|
||||
supports_gpt_35_turbo = True
|
||||
supports_stream = False
|
||||
|
@ -2,13 +2,11 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import requests
|
||||
from .base_provider import BaseProvider
|
||||
from ..typing import Messages, CreateResult
|
||||
from .helper import get_cookies
|
||||
from ..base_provider import AbstractProvider
|
||||
from ...typing import Messages, CreateResult
|
||||
|
||||
|
||||
|
||||
class VoiGpt(BaseProvider):
|
||||
class VoiGpt(AbstractProvider):
|
||||
"""
|
||||
VoiGpt - A provider for VoiGpt.com
|
||||
|
||||
|
@ -5,10 +5,10 @@ import random
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ..base_provider import BaseProvider, format_prompt
|
||||
from ..base_provider import AbstractProvider, format_prompt
|
||||
|
||||
|
||||
class Wuguokai(BaseProvider):
|
||||
class Wuguokai(AbstractProvider):
|
||||
url = 'https://chat.wuguokai.xyz'
|
||||
supports_gpt_35_turbo = True
|
||||
working = False
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import asyncio
|
||||
import webbrowser
|
||||
import random
|
||||
@ -8,7 +7,7 @@ import string
|
||||
import secrets
|
||||
import os
|
||||
from os import path
|
||||
from asyncio import AbstractEventLoop
|
||||
from asyncio import AbstractEventLoop, BaseEventLoop
|
||||
from platformdirs import user_config_dir
|
||||
from browser_cookie3 import (
|
||||
chrome,
|
||||
@ -34,7 +33,8 @@ _cookies: Dict[str, Dict[str, str]] = {}
|
||||
def get_event_loop() -> AbstractEventLoop:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
loop._check_closed()
|
||||
if isinstance(loop, BaseEventLoop):
|
||||
loop._check_closed()
|
||||
except RuntimeError:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
@ -8,11 +8,11 @@ from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
from ..helper import format_prompt
|
||||
from ...webdriver import WebDriver, WebDriverSession
|
||||
|
||||
class Bard(BaseProvider):
|
||||
class Bard(AbstractProvider):
|
||||
url = "https://bard.google.com"
|
||||
working = True
|
||||
needs_auth = True
|
||||
|
@ -8,6 +8,9 @@ from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
|
||||
map = {
|
||||
"openchat/openchat_3.5": "openchat/openchat-3.5-1210",
|
||||
}
|
||||
|
||||
class HuggingChat(AsyncGeneratorProvider):
|
||||
url = "https://huggingface.co/chat"
|
||||
@ -25,7 +28,10 @@ class HuggingChat(AsyncGeneratorProvider):
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = model if model else cls.model
|
||||
if not model:
|
||||
model = cls.model
|
||||
elif model in map:
|
||||
model = map[model]
|
||||
if not cookies:
|
||||
cookies = get_cookies(".huggingface.co")
|
||||
|
||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import time
|
||||
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
from ..helper import format_prompt
|
||||
from ...webdriver import WebDriver, WebDriverSession
|
||||
|
||||
@ -20,7 +20,7 @@ models = {
|
||||
"palm": {"name": "Google-PaLM"},
|
||||
}
|
||||
|
||||
class Poe(BaseProvider):
|
||||
class Poe(AbstractProvider):
|
||||
url = "https://poe.com"
|
||||
working = True
|
||||
needs_auth = True
|
||||
|
@ -5,10 +5,10 @@ import json
|
||||
import requests
|
||||
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
|
||||
class Raycast(BaseProvider):
|
||||
class Raycast(AbstractProvider):
|
||||
url = "https://raycast.com"
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import time
|
||||
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
from ..helper import format_prompt
|
||||
from ...webdriver import WebDriver, WebDriverSession
|
||||
|
||||
@ -31,7 +31,7 @@ models = {
|
||||
"qwen-7b-chat": "Qwen 7B"
|
||||
}
|
||||
|
||||
class Theb(BaseProvider):
|
||||
class Theb(AbstractProvider):
|
||||
url = "https://beta.theb.ai"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
|
||||
models = {
|
||||
"theb-ai": "TheB.AI",
|
||||
@ -29,7 +29,7 @@ models = {
|
||||
"qwen-7b-chat": "Qwen 7B"
|
||||
}
|
||||
|
||||
class ThebApi(BaseProvider):
|
||||
class ThebApi(AbstractProvider):
|
||||
url = "https://theb.ai"
|
||||
working = True
|
||||
needs_auth = True
|
||||
|
@ -2,26 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import random
|
||||
from typing import List, Type, Dict
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import BaseProvider, AsyncProvider
|
||||
from ..base_provider import BaseRetryProvider
|
||||
from .. import debug
|
||||
from ..errors import RetryProviderError, RetryNoProviderError
|
||||
|
||||
|
||||
class RetryProvider(AsyncProvider):
|
||||
__name__: str = "RetryProvider"
|
||||
supports_stream: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
providers: List[Type[BaseProvider]],
|
||||
shuffle: bool = True
|
||||
) -> None:
|
||||
self.providers: List[Type[BaseProvider]] = providers
|
||||
self.shuffle: bool = shuffle
|
||||
self.working = True
|
||||
|
||||
class RetryProvider(BaseRetryProvider):
|
||||
def create_completion(
|
||||
self,
|
||||
model: str,
|
||||
@ -36,20 +23,18 @@ class RetryProvider(AsyncProvider):
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
self.exceptions: Dict[str, Exception] = {}
|
||||
self.exceptions = {}
|
||||
started: bool = False
|
||||
for provider in providers:
|
||||
self.last_provider = provider
|
||||
try:
|
||||
if debug.logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
yield token
|
||||
started = True
|
||||
|
||||
if started:
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if debug.logging:
|
||||
@ -69,8 +54,9 @@ class RetryProvider(AsyncProvider):
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
self.exceptions: Dict[str, Exception] = {}
|
||||
self.exceptions = {}
|
||||
for provider in providers:
|
||||
self.last_provider = provider
|
||||
try:
|
||||
return await asyncio.wait_for(
|
||||
provider.create_async(model, messages, **kwargs),
|
||||
|
@ -4,11 +4,11 @@ import time
|
||||
from urllib.parse import quote
|
||||
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
from ..helper import format_prompt
|
||||
from ...webdriver import WebDriver, WebDriverSession
|
||||
|
||||
class Phind(BaseProvider):
|
||||
class Phind(AbstractProvider):
|
||||
url = "https://www.phind.com"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
|
@ -3,11 +3,11 @@ from __future__ import annotations
|
||||
from urllib.parse import unquote
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..base_provider import AbstractProvider
|
||||
from ...webdriver import WebDriver
|
||||
from ...requests import Session, get_session_from_browser
|
||||
|
||||
class AiChatting(BaseProvider):
|
||||
class AiChatting(AbstractProvider):
|
||||
url = "https://www.aichatting.net"
|
||||
supports_gpt_35_turbo = True
|
||||
_session: Session = None
|
||||
|
@ -4,49 +4,57 @@ import os
|
||||
|
||||
from .errors import *
|
||||
from .models import Model, ModelUtils, _all_models
|
||||
from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider, ProviderUtils
|
||||
from .typing import Messages, CreateResult, AsyncResult, Union, List
|
||||
from . import debug
|
||||
from .Provider import AsyncGeneratorProvider, ProviderUtils
|
||||
from .typing import Messages, CreateResult, AsyncResult, Union
|
||||
from . import debug, version
|
||||
from .base_provider import BaseRetryProvider, ProviderType
|
||||
|
||||
def get_model_and_provider(model : Union[Model, str],
|
||||
provider : Union[type[BaseProvider], str, None],
|
||||
provider : Union[ProviderType, str, None],
|
||||
stream : bool,
|
||||
ignored : List[str] = None,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]:
|
||||
ignore_stream: bool = False) -> tuple[str, ProviderType]:
|
||||
if debug.version_check:
|
||||
debug.version_check = False
|
||||
debug.check_pypi_version()
|
||||
|
||||
version.utils.check_pypi_version()
|
||||
|
||||
if isinstance(provider, str):
|
||||
if provider in ProviderUtils.convert:
|
||||
provider = ProviderUtils.convert[provider]
|
||||
else:
|
||||
raise ProviderNotFoundError(f'Provider not found: {provider}')
|
||||
|
||||
if isinstance(model, str):
|
||||
if model in ModelUtils.convert:
|
||||
model = ModelUtils.convert[model]
|
||||
else:
|
||||
raise ModelNotFoundError(f'The model: {model} does not exist')
|
||||
|
||||
if not provider:
|
||||
if isinstance(model, str):
|
||||
if model in ModelUtils.convert:
|
||||
model = ModelUtils.convert[model]
|
||||
else:
|
||||
raise ModelNotFoundError(f'Model not found: {model}')
|
||||
provider = model.best_provider
|
||||
|
||||
if isinstance(provider, RetryProvider) and ignored:
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not provider:
|
||||
raise ProviderNotFoundError(f'No provider found for model: {model}')
|
||||
|
||||
if not provider.working and not ignore_working:
|
||||
if isinstance(model, Model):
|
||||
model = model.name
|
||||
|
||||
if ignored and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not ignore_working and not provider.working:
|
||||
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
||||
|
||||
if not ignore_stream and not provider.supports_stream and stream:
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
||||
|
||||
if debug.logging:
|
||||
print(f'Using {provider.__name__} provider')
|
||||
if model:
|
||||
print(f'Using {provider.__name__} provider and {model} model')
|
||||
else:
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
debug.last_provider = provider
|
||||
|
||||
return model, provider
|
||||
|
||||
@ -54,10 +62,10 @@ class ChatCompletion:
|
||||
@staticmethod
|
||||
def create(model : Union[Model, str],
|
||||
messages : Messages,
|
||||
provider : Union[type[BaseProvider], str, None] = None,
|
||||
provider : Union[ProviderType, str, None] = None,
|
||||
stream : bool = False,
|
||||
auth : Union[str, None] = None,
|
||||
ignored : List[str] = None,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
ignore_stream_and_auth: bool = False,
|
||||
**kwargs) -> Union[CreateResult, str]:
|
||||
@ -75,32 +83,33 @@ class ChatCompletion:
|
||||
if proxy:
|
||||
kwargs['proxy'] = proxy
|
||||
|
||||
result = provider.create_completion(model.name, messages, stream, **kwargs)
|
||||
result = provider.create_completion(model, messages, stream, **kwargs)
|
||||
return result if stream else ''.join(result)
|
||||
|
||||
@staticmethod
|
||||
async def create_async(model : Union[Model, str],
|
||||
messages : Messages,
|
||||
provider : Union[type[BaseProvider], str, None] = None,
|
||||
stream : bool = False,
|
||||
ignored : List[str] = None,
|
||||
**kwargs) -> Union[AsyncResult, str]:
|
||||
def create_async(model : Union[Model, str],
|
||||
messages : Messages,
|
||||
provider : Union[ProviderType, str, None] = None,
|
||||
stream : bool = False,
|
||||
ignored : list[str] = None,
|
||||
**kwargs) -> Union[AsyncResult, str]:
|
||||
|
||||
model, provider = get_model_and_provider(model, provider, False, ignored)
|
||||
|
||||
if stream:
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
return await provider.create_async_generator(model.name, messages, **kwargs)
|
||||
return provider.create_async_generator(model, messages, **kwargs)
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
|
||||
|
||||
return await provider.create_async(model.name, messages, **kwargs)
|
||||
return provider.create_async(model, messages, **kwargs)
|
||||
|
||||
class Completion:
|
||||
@staticmethod
|
||||
def create(model : Union[Model, str],
|
||||
prompt : str,
|
||||
provider : Union[type[BaseProvider], None] = None,
|
||||
provider : Union[ProviderType, None] = None,
|
||||
stream : bool = False,
|
||||
ignored : List[str] = None, **kwargs) -> Union[CreateResult, str]:
|
||||
ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]:
|
||||
|
||||
allowed_models = [
|
||||
'code-davinci-002',
|
||||
@ -111,10 +120,18 @@ class Completion:
|
||||
'text-davinci-003'
|
||||
]
|
||||
if model not in allowed_models:
|
||||
raise ModelNotAllowed(f'Can\'t use {model} with Completion.create()')
|
||||
raise ModelNotAllowedError(f'Can\'t use {model} with Completion.create()')
|
||||
|
||||
model, provider = get_model_and_provider(model, provider, stream, ignored)
|
||||
|
||||
result = provider.create_completion(model.name, [{"role": "user", "content": prompt}], stream, **kwargs)
|
||||
result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
|
||||
|
||||
return result if stream else ''.join(result)
|
||||
return result if stream else ''.join(result)
|
||||
|
||||
def get_last_provider(as_dict: bool = False) -> ProviderType:
|
||||
last = debug.last_provider
|
||||
if isinstance(last, BaseRetryProvider):
|
||||
last = last.last_provider
|
||||
if last and as_dict:
|
||||
return {"name": last.__name__, "url": last.url}
|
||||
return last
|
@ -1,23 +1,25 @@
|
||||
import ast
|
||||
import logging
|
||||
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
from typing import List, Union, Any, Dict, AnyStr
|
||||
#from ._tokenizer import tokenize
|
||||
from .. import BaseProvider
|
||||
|
||||
import time
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import uvicorn
|
||||
import nest_asyncio
|
||||
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
from typing import List, Union, Any, Dict, AnyStr
|
||||
#from ._tokenizer import tokenize
|
||||
|
||||
import g4f
|
||||
from .. import debug
|
||||
|
||||
debug.logging = True
|
||||
|
||||
class Api:
|
||||
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
||||
list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None:
|
||||
list_ignored_providers: List[str] = None) -> None:
|
||||
self.engine = engine
|
||||
self.debug = debug
|
||||
self.sentry = sentry
|
||||
@ -75,7 +77,10 @@ class Api:
|
||||
}
|
||||
|
||||
# item contains byte keys, and dict.get suppresses error
|
||||
item_data.update({key.decode('utf-8') if isinstance(key, bytes) else key: str(value) for key, value in (item or {}).items()})
|
||||
item_data.update({
|
||||
key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
|
||||
for key, value in (item or {}).items()
|
||||
})
|
||||
# messages is str, need dict
|
||||
if isinstance(item_data.get('messages'), str):
|
||||
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
|
||||
@ -96,7 +101,12 @@ class Api:
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
|
||||
content = json.dumps({
|
||||
"error": {"message": f"An error occurred while generating the response:\n{e}"},
|
||||
"model": model,
|
||||
"provider": g4f.get_last_provider(True)
|
||||
})
|
||||
return Response(content=content, status_code=500, media_type="application/json")
|
||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||
completion_timestamp = int(time.time())
|
||||
|
||||
@ -109,6 +119,7 @@ class Api:
|
||||
'object': 'chat.completion',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'provider': g4f.get_last_provider(True),
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
@ -136,6 +147,7 @@ class Api:
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'provider': g4f.get_last_provider(True),
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
@ -147,16 +159,14 @@ class Api:
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
content = json.dumps(completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
yield f'data: {json.dumps(completion_data)}\n\n'
|
||||
time.sleep(0.03)
|
||||
|
||||
end_completion_data = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'provider': g4f.get_last_provider(True),
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
@ -165,15 +175,17 @@ class Api:
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
content = json.dumps(end_completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
yield f'data: {json.dumps(end_completion_data)}\n\n'
|
||||
except GeneratorExit:
|
||||
pass
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
|
||||
yield f'data: {content}\n\n'
|
||||
content = json.dumps({
|
||||
"error": {"message": f"An error occurred while generating the response:\n{e}"},
|
||||
"model": model,
|
||||
"provider": g4f.get_last_provider(True),
|
||||
})
|
||||
yield f'data: {content}'
|
||||
|
||||
return StreamingResponse(streaming(), media_type="text/event-stream")
|
||||
|
||||
|
54
g4f/base_provider.py
Normal file
54
g4f/base_provider.py
Normal file
@ -0,0 +1,54 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from .typing import Messages, CreateResult, Union
|
||||
|
||||
class BaseProvider(ABC):
|
||||
url: str
|
||||
working: bool = False
|
||||
needs_auth: bool = False
|
||||
supports_stream: bool = False
|
||||
supports_gpt_35_turbo: bool = False
|
||||
supports_gpt_4: bool = False
|
||||
supports_message_history: bool = False
|
||||
params: str
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
**kwargs
|
||||
) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def get_dict(cls):
|
||||
return {'name': cls.__name__, 'url': cls.url}
|
||||
|
||||
class BaseRetryProvider(BaseProvider):
|
||||
__name__: str = "RetryProvider"
|
||||
supports_stream: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
providers: list[type[BaseProvider]],
|
||||
shuffle: bool = True
|
||||
) -> None:
|
||||
self.providers: list[type[BaseProvider]] = providers
|
||||
self.shuffle: bool = shuffle
|
||||
self.working: bool = True
|
||||
self.exceptions: dict[str, Exception] = {}
|
||||
self.last_provider: type[BaseProvider] = None
|
||||
|
||||
ProviderType = Union[type[BaseProvider], BaseRetryProvider]
|
48
g4f/debug.py
48
g4f/debug.py
@ -1,45 +1,5 @@
|
||||
from os import environ
|
||||
import requests
|
||||
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
||||
from subprocess import check_output, CalledProcessError, PIPE
|
||||
from .errors import VersionNotFoundError
|
||||
from .base_provider import ProviderType
|
||||
|
||||
logging = False
|
||||
version_check = True
|
||||
|
||||
def get_version() -> str:
|
||||
# Read from package manager
|
||||
try:
|
||||
return get_package_version("g4f")
|
||||
except PackageNotFoundError:
|
||||
pass
|
||||
# Read from docker environment
|
||||
current_version = environ.get("G4F_VERSION")
|
||||
if current_version:
|
||||
return current_version
|
||||
# Read from git repository
|
||||
try:
|
||||
command = ["git", "describe", "--tags", "--abbrev=0"]
|
||||
return check_output(command, text=True, stderr=PIPE).strip()
|
||||
except CalledProcessError:
|
||||
pass
|
||||
raise VersionNotFoundError("Version not found")
|
||||
|
||||
def get_latest_version() -> str:
|
||||
if environ.get("G4F_VERSION"):
|
||||
url = "https://registry.hub.docker.com/v2/repositories/"
|
||||
url += "hlohaus789/g4f"
|
||||
url += "/tags?page_size=2&ordering=last_updated"
|
||||
response = requests.get(url).json()
|
||||
return response["results"][1]["name"]
|
||||
response = requests.get("https://pypi.org/pypi/g4f/json").json()
|
||||
return response["info"]["version"]
|
||||
|
||||
def check_pypi_version() -> None:
|
||||
try:
|
||||
version = get_version()
|
||||
latest_version = get_latest_version()
|
||||
if version != latest_version:
|
||||
print(f'New pypi version: {latest_version} (current: {version}) | pip install -U g4f')
|
||||
except Exception as e:
|
||||
print(f'Failed to check g4f pypi version: {e}')
|
||||
logging: bool = False
|
||||
version_check: bool = True
|
||||
last_provider: ProviderType = None
|
@ -13,7 +13,7 @@ class AuthenticationRequiredError(Exception):
|
||||
class ModelNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
class ModelNotAllowed(Exception):
|
||||
class ModelNotAllowedError(Exception):
|
||||
pass
|
||||
|
||||
class RetryProviderError(Exception):
|
||||
|
@ -295,11 +295,12 @@ body {
|
||||
gap: 18px;
|
||||
}
|
||||
|
||||
.message .content p,
|
||||
.message .content li,
|
||||
.message .content code {
|
||||
.message .content,
|
||||
.message .content a:link,
|
||||
.message .content a:visited{
|
||||
font-size: 15px;
|
||||
line-height: 1.3;
|
||||
color: var(--colour-3);
|
||||
}
|
||||
.message .content pre {
|
||||
white-space: pre-wrap;
|
||||
|
@ -73,7 +73,7 @@ const ask_gpt = async () => {
|
||||
provider = document.getElementById("provider");
|
||||
model = document.getElementById("model");
|
||||
prompt_lock = true;
|
||||
window.text = ``;
|
||||
window.text = '';
|
||||
|
||||
stop_generating.classList.remove(`stop_generating-hidden`);
|
||||
|
||||
@ -88,10 +88,13 @@ const ask_gpt = async () => {
|
||||
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
|
||||
</div>
|
||||
<div class="content" id="gpt_${window.token}">
|
||||
<div id="cursor"></div>
|
||||
<div class="provider"></div>
|
||||
<div class="content_inner"><div id="cursor"></div></div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
content = document.getElementById(`gpt_${window.token}`);
|
||||
content_inner = content.querySelector('.content_inner');
|
||||
|
||||
message_box.scrollTop = message_box.scrollHeight;
|
||||
window.scrollTo(0, 0);
|
||||
@ -123,28 +126,38 @@ const ask_gpt = async () => {
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
window.scrollTo(0, 0);
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
|
||||
|
||||
error = provider = null;
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
chunk = new TextDecoder().decode(value);
|
||||
|
||||
text += chunk;
|
||||
|
||||
document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text);
|
||||
document.querySelectorAll(`code`).forEach((el) => {
|
||||
hljs.highlightElement(el);
|
||||
});
|
||||
for (const line of value.split("\n")) {
|
||||
if (!line) continue;
|
||||
const message = JSON.parse(line);
|
||||
if (message["type"] == "content") {
|
||||
text += message["content"];
|
||||
} else if (message["type"] == "provider") {
|
||||
provider = message["provider"];
|
||||
content.querySelector('.provider').innerHTML =
|
||||
'<a href="' + provider.url + '" target="_blank">' + provider.name + "</a>"
|
||||
} else if (message["type"] == "error") {
|
||||
error = message["error"];
|
||||
}
|
||||
}
|
||||
if (error) {
|
||||
console.error(error);
|
||||
content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
|
||||
} else {
|
||||
content_inner.innerHTML = markdown_render(text);
|
||||
document.querySelectorAll('code').forEach((el) => {
|
||||
hljs.highlightElement(el);
|
||||
});
|
||||
}
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
||||
}
|
||||
|
||||
if (text.includes(`G4F_ERROR`)) {
|
||||
console.log("response", text);
|
||||
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
|
||||
@ -153,13 +166,13 @@ const ask_gpt = async () => {
|
||||
|
||||
if (e.name != `AbortError`) {
|
||||
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
|
||||
document.getElementById(`gpt_${window.token}`).innerHTML = text;
|
||||
content_inner.innerHTML = text;
|
||||
} else {
|
||||
document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
|
||||
content_inner.innerHTML += ` [aborted]`;
|
||||
text += ` [aborted]`
|
||||
}
|
||||
}
|
||||
add_message(window.conversation_id, "assistant", text);
|
||||
add_message(window.conversation_id, "assistant", text, provider);
|
||||
message_box.scrollTop = message_box.scrollHeight;
|
||||
await remove_cancel_button();
|
||||
prompt_lock = false;
|
||||
@ -259,10 +272,11 @@ const load_conversation = async (conversation_id) => {
|
||||
}
|
||||
</div>
|
||||
<div class="content">
|
||||
${item.role == "assistant"
|
||||
? markdown_render(item.content)
|
||||
: item.content
|
||||
${item.provider
|
||||
? '<div class="provider"><a href="' + item.provider.url + '" target="_blank">' + item.provider.name + '</a></div>'
|
||||
: ''
|
||||
}
|
||||
<div class="content_inner">${markdown_render(item.content)}</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
@ -323,12 +337,13 @@ const remove_last_message = async (conversation_id) => {
|
||||
);
|
||||
};
|
||||
|
||||
const add_message = async (conversation_id, role, content) => {
|
||||
const add_message = async (conversation_id, role, content, provider) => {
|
||||
const conversation = await get_conversation(conversation_id);
|
||||
|
||||
conversation.items.push({
|
||||
role: role,
|
||||
content: content,
|
||||
provider: provider
|
||||
});
|
||||
|
||||
localStorage.setItem(
|
||||
|
@ -4,7 +4,7 @@ from g4f.Provider import __providers__
|
||||
import json
|
||||
from flask import request, Flask
|
||||
from .internet import get_search_message
|
||||
from g4f import debug
|
||||
from g4f import debug, version
|
||||
|
||||
debug.logging = True
|
||||
|
||||
@ -53,8 +53,8 @@ class Backend_Api:
|
||||
|
||||
def version(self):
|
||||
return {
|
||||
"version": debug.get_version(),
|
||||
"lastet_version": debug.get_latest_version(),
|
||||
"version": version.utils.current_version,
|
||||
"lastet_version": version.utils.latest_version,
|
||||
}
|
||||
|
||||
def _gen_title(self):
|
||||
@ -65,7 +65,7 @@ class Backend_Api:
|
||||
def _conversation(self):
|
||||
#jailbreak = request.json['jailbreak']
|
||||
messages = request.json['meta']['content']['parts']
|
||||
if request.json['internet_access']:
|
||||
if request.json.get('internet_access'):
|
||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||
model = request.json.get('model')
|
||||
model = model if model else g4f.models.default
|
||||
@ -74,20 +74,30 @@ class Backend_Api:
|
||||
|
||||
def try_response():
|
||||
try:
|
||||
yield from g4f.ChatCompletion.create(
|
||||
first = True
|
||||
for chunk in g4f.ChatCompletion.create(
|
||||
model=model,
|
||||
provider=provider,
|
||||
messages=messages,
|
||||
stream=True,
|
||||
ignore_stream_and_auth=True
|
||||
)
|
||||
):
|
||||
if first:
|
||||
first = False
|
||||
yield json.dumps({
|
||||
'type' : 'provider',
|
||||
'provider': g4f.get_last_provider(True)
|
||||
}) + "\n"
|
||||
yield json.dumps({
|
||||
'type' : 'content',
|
||||
'content': chunk,
|
||||
}) + "\n"
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
yield json.dumps({
|
||||
'code' : 'G4F_ERROR',
|
||||
'_action': '_ask',
|
||||
'success': False,
|
||||
'error' : f'{e.__class__.__name__}: {e}'
|
||||
'type' : 'error',
|
||||
'error': f'{e.__class__.__name__}: {e}'
|
||||
})
|
||||
raise e
|
||||
|
||||
return self.app.response_class(try_response(), mimetype='text/event-stream')
|
@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
from dataclasses import dataclass
|
||||
from .typing import Union
|
||||
from .Provider import BaseProvider, RetryProvider
|
||||
from .Provider import RetryProvider, ProviderType
|
||||
from .Provider import (
|
||||
Chatgpt4Online,
|
||||
ChatgptDemoAi,
|
||||
@ -36,7 +35,7 @@ from .Provider import (
|
||||
class Model:
|
||||
name: str
|
||||
base_provider: str
|
||||
best_provider: Union[type[BaseProvider], RetryProvider] = None
|
||||
best_provider: ProviderType = None
|
||||
|
||||
@staticmethod
|
||||
def __all__() -> list[str]:
|
||||
@ -101,28 +100,39 @@ gpt_4_turbo = Model(
|
||||
llama2_7b = Model(
|
||||
name = "meta-llama/Llama-2-7b-chat-hf",
|
||||
base_provider = 'huggingface',
|
||||
best_provider = RetryProvider([Llama2, DeepInfra]))
|
||||
best_provider = RetryProvider([Llama2, DeepInfra])
|
||||
)
|
||||
|
||||
llama2_13b = Model(
|
||||
name = "meta-llama/Llama-2-13b-chat-hf",
|
||||
base_provider = 'huggingface',
|
||||
best_provider = RetryProvider([Llama2, DeepInfra]))
|
||||
best_provider = RetryProvider([Llama2, DeepInfra])
|
||||
)
|
||||
|
||||
llama2_70b = Model(
|
||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||
base_provider = "huggingface",
|
||||
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]))
|
||||
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
|
||||
)
|
||||
|
||||
# Mistal
|
||||
mixtral_8x7b = Model(
|
||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
base_provider = "huggingface",
|
||||
best_provider = HuggingChat)
|
||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||
)
|
||||
|
||||
mistral_7b = Model(
|
||||
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
||||
base_provider = "huggingface",
|
||||
best_provider = HuggingChat)
|
||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||
)
|
||||
|
||||
openchat_35 = Model(
|
||||
name = "openchat/openchat_3.5",
|
||||
base_provider = "huggingface",
|
||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||
)
|
||||
|
||||
# Bard
|
||||
palm = Model(
|
||||
@ -313,6 +323,7 @@ class ModelUtils:
|
||||
# Mistral
|
||||
'mixtral-8x7b': mixtral_8x7b,
|
||||
'mistral-7b': mistral_7b,
|
||||
'openchat_3.5': openchat_35,
|
||||
|
||||
# Bard
|
||||
'palm2' : palm,
|
||||
|
@ -1,5 +1,5 @@
|
||||
import sys
|
||||
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
|
||||
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from typing import TypedDict
|
||||
|
47
g4f/version.py
Normal file
47
g4f/version.py
Normal file
@ -0,0 +1,47 @@
|
||||
from os import environ
|
||||
import requests
|
||||
from functools import cached_property
|
||||
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
||||
from subprocess import check_output, CalledProcessError, PIPE
|
||||
from .errors import VersionNotFoundError
|
||||
|
||||
|
||||
class VersionUtils():
|
||||
@cached_property
|
||||
def current_version(self) -> str:
|
||||
# Read from package manager
|
||||
try:
|
||||
return get_package_version("g4f")
|
||||
except PackageNotFoundError:
|
||||
pass
|
||||
# Read from docker environment
|
||||
version = environ.get("G4F_VERSION")
|
||||
if version:
|
||||
return version
|
||||
# Read from git repository
|
||||
try:
|
||||
command = ["git", "describe", "--tags", "--abbrev=0"]
|
||||
return check_output(command, text=True, stderr=PIPE).strip()
|
||||
except CalledProcessError:
|
||||
pass
|
||||
raise VersionNotFoundError("Version not found")
|
||||
|
||||
@cached_property
|
||||
def latest_version(self) -> str:
|
||||
try:
|
||||
get_package_version("g4f")
|
||||
response = requests.get("https://pypi.org/pypi/g4f/json").json()
|
||||
return response["info"]["version"]
|
||||
except PackageNotFoundError:
|
||||
url = "https://api.github.com/repos/xtekky/gpt4free/releases/latest"
|
||||
response = requests.get(url).json()
|
||||
return response["tag_name"]
|
||||
|
||||
def check_pypi_version(self) -> None:
|
||||
try:
|
||||
if self.current_version != self.latest_version:
|
||||
print(f'New pypi version: {self.latest_version} (current: {self.version}) | pip install -U g4f')
|
||||
except Exception as e:
|
||||
print(f'Failed to check g4f pypi version: {e}')
|
||||
|
||||
utils = VersionUtils()
|
Loading…
Reference in New Issue
Block a user