mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-22 10:31:36 +03:00
Add local models to gui, Fix You Provider, add AsyncClient
This commit is contained in:
parent
674ba8f2c5
commit
b35dfcd1b0
@ -18,7 +18,7 @@ class Aura(AsyncGeneratorProvider):
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
temperature: float = 0.5,
|
||||
max_tokens: int = 8192.
|
||||
max_tokens: int = 8192,
|
||||
webdriver: WebDriver = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
|
@ -1,42 +1,41 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import requests
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from .needs_auth.Openai import Openai
|
||||
|
||||
class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
class DeepInfra(Openai):
|
||||
url = "https://deepinfra.com"
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_message_history = True
|
||||
default_model = 'meta-llama/Llama-2-70b-chat-hf'
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if not cls.models:
|
||||
url = 'https://api.deepinfra.com/models/featured'
|
||||
models = requests.get(url).json()
|
||||
cls.models = [model['model_name'] for model in models]
|
||||
cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
auth: str = None,
|
||||
api_base: str = "https://api.deepinfra.com/v1/openai",
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 1028,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Accept-Language': 'en-US',
|
||||
'Connection': 'keep-alive',
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Type': None,
|
||||
'Origin': 'https://deepinfra.com',
|
||||
'Referer': 'https://deepinfra.com/',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
@ -44,46 +43,17 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
'Sec-Fetch-Site': 'same-site',
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
'X-Deepinfra-Source': 'web-embed',
|
||||
'accept': 'text/event-stream',
|
||||
'Accept': None,
|
||||
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
}
|
||||
if auth:
|
||||
headers['Authorization'] = f"bearer {auth}"
|
||||
|
||||
async with StreamSession(headers=headers,
|
||||
timeout=timeout,
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome110"
|
||||
) as session:
|
||||
json_data = {
|
||||
'model' : cls.get_model(model),
|
||||
'messages': messages,
|
||||
'temperature': kwargs.get("temperature", 0.7),
|
||||
'max_tokens': kwargs.get("max_tokens", 512),
|
||||
'stop': kwargs.get("stop", []),
|
||||
'stream' : True
|
||||
}
|
||||
async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
|
||||
json=json_data) as response:
|
||||
await raise_for_status(response)
|
||||
first = True
|
||||
async for line in response.iter_lines():
|
||||
if not line.startswith(b"data: "):
|
||||
continue
|
||||
try:
|
||||
json_line = json.loads(line[6:])
|
||||
choices = json_line.get("choices", [{}])
|
||||
finish_reason = choices[0].get("finish_reason")
|
||||
if finish_reason:
|
||||
break
|
||||
token = choices[0].get("delta", {}).get("content")
|
||||
if token:
|
||||
if first:
|
||||
token = token.lstrip()
|
||||
if token:
|
||||
first = False
|
||||
yield token
|
||||
except Exception:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
return super().create_async_generator(
|
||||
model, messages,
|
||||
stream=stream,
|
||||
api_base=api_base,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
headers=headers,
|
||||
**kwargs
|
||||
)
|
@ -76,7 +76,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if not response.ok:
|
||||
data = await response.json()
|
||||
data = data[0] if isinstance(data, list) else data
|
||||
raise RuntimeError(f"Response {response.status}: {data["error"]["message"]}")
|
||||
raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
|
||||
if stream:
|
||||
lines = []
|
||||
async for chunk in response.content:
|
||||
|
42
g4f/Provider/Local.py
Normal file
42
g4f/Provider/Local.py
Normal file
@ -0,0 +1,42 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..locals.models import get_models
|
||||
try:
|
||||
from ..locals.provider import LocalProvider
|
||||
has_requirements = True
|
||||
except ModuleNotFoundError:
|
||||
has_requirements = False
|
||||
|
||||
from ..typing import Messages, CreateResult
|
||||
from ..providers.base_provider import AbstractProvider, ProviderModelMixin
|
||||
from ..errors import MissingRequirementsError
|
||||
|
||||
class Local(AbstractProvider, ProviderModelMixin):
|
||||
working = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
supports_stream = True
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if not cls.models:
|
||||
cls.models = list(get_models())
|
||||
cls.default_model = cls.models[0]
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
if not has_requirements:
|
||||
raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
|
||||
return LocalProvider.create_completion(
|
||||
cls.get_model(model),
|
||||
messages,
|
||||
stream,
|
||||
**kwargs
|
||||
)
|
@ -17,6 +17,8 @@ from ..image import to_bytes, ImageResponse
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from ..errors import MissingRequirementsError
|
||||
|
||||
from .you.har_file import get_dfp_telemetry_id
|
||||
|
||||
class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://you.com"
|
||||
working = True
|
||||
@ -45,6 +47,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
proxy: str = None,
|
||||
@ -56,7 +59,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if image is not None:
|
||||
chat_mode = "agent"
|
||||
elif not model or model == cls.default_model:
|
||||
chat_mode = "default"
|
||||
...
|
||||
elif model.startswith("dall-e"):
|
||||
chat_mode = "create"
|
||||
else:
|
||||
@ -108,7 +111,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
data = json.loads(line[6:])
|
||||
if event == "youChatToken" and event in data:
|
||||
yield data[event]
|
||||
elif event == "youChatUpdate" and "t" in data:
|
||||
elif event == "youChatUpdate" and "t" in data and data["t"] is not None:
|
||||
match = re.search(r"!\[fig\]\((.+?)\)", data["t"])
|
||||
if match:
|
||||
yield ImageResponse(match.group(1), messages[-1]["content"])
|
||||
@ -177,6 +180,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"X-SDK-Parent-Host": cls.url
|
||||
},
|
||||
json={
|
||||
"dfp_telemetry_id": await get_dfp_telemetry_id(),
|
||||
"email": f"{user_uuid}@gmail.com",
|
||||
"password": f"{user_uuid}#{user_uuid}",
|
||||
"session_duration_minutes": 129600
|
||||
|
@ -33,6 +33,7 @@ from .HuggingFace import HuggingFace
|
||||
from .Koala import Koala
|
||||
from .Liaobots import Liaobots
|
||||
from .Llama2 import Llama2
|
||||
from .Local import Local
|
||||
from .PerplexityLabs import PerplexityLabs
|
||||
from .Pi import Pi
|
||||
from .Vercel import Vercel
|
||||
|
@ -8,7 +8,6 @@ from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
|
||||
|
||||
class OpenAssistant(AsyncGeneratorProvider):
|
||||
url = "https://open-assistant.io/chat"
|
||||
needs_auth = True
|
@ -31,4 +31,5 @@ from .FakeGpt import FakeGpt
|
||||
from .GeekGpt import GeekGpt
|
||||
from .GPTalk import GPTalk
|
||||
from .Hashnode import Hashnode
|
||||
from .Ylokh import Ylokh
|
||||
from .Ylokh import Ylokh
|
||||
from .OpenAssistant import OpenAssistant
|
@ -19,7 +19,7 @@ except ImportError:
|
||||
from ...typing import Messages, Cookies, ImageType, AsyncResult
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
from requests.raise_for_status import raise_for_status
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...errors import MissingAuthError, MissingRequirementsError
|
||||
from ...image import to_bytes, ImageResponse
|
||||
from ...webdriver import get_browser, get_driver_cookies
|
||||
|
@ -3,10 +3,10 @@ from __future__ import annotations
|
||||
import json
|
||||
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...typing import Union, Optional, AsyncResult, Messages
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...requests import StreamSession
|
||||
from ...errors import MissingAuthError
|
||||
from ...errors import MissingAuthError, ResponseError
|
||||
|
||||
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://openai.com"
|
||||
@ -27,48 +27,82 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
temperature: float = None,
|
||||
max_tokens: int = None,
|
||||
top_p: float = None,
|
||||
stop: str = None,
|
||||
stop: Union[str, list[str]] = None,
|
||||
stream: bool = False,
|
||||
headers: dict = None,
|
||||
extra_data: dict = {},
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if api_key is None:
|
||||
if cls.needs_auth and api_key is None:
|
||||
raise MissingAuthError('Add a "api_key"')
|
||||
async with StreamSession(
|
||||
proxies={"all": proxy},
|
||||
headers=cls.get_headers(api_key),
|
||||
headers=cls.get_headers(stream, api_key, headers),
|
||||
timeout=timeout
|
||||
) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"model": cls.get_model(model),
|
||||
"temperature": temperature,
|
||||
"max_tokens": max_tokens,
|
||||
"top_p": top_p,
|
||||
"stop": stop,
|
||||
"stream": stream,
|
||||
}
|
||||
data = filter_none(
|
||||
messages=messages,
|
||||
model=cls.get_model(model),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
stop=stop,
|
||||
stream=stream,
|
||||
**extra_data
|
||||
)
|
||||
async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"data: ") or not stream:
|
||||
async for chunk in cls.read_line(line[6:] if stream else line, stream):
|
||||
yield chunk
|
||||
if not stream:
|
||||
data = await response.json()
|
||||
choice = data["choices"][0]
|
||||
if "content" in choice["message"]:
|
||||
yield choice["message"]["content"].strip()
|
||||
finish = cls.read_finish_reason(choice)
|
||||
if finish is not None:
|
||||
yield finish
|
||||
else:
|
||||
first = True
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"data: "):
|
||||
chunk = line[6:]
|
||||
if chunk == b"[DONE]":
|
||||
break
|
||||
data = json.loads(chunk)
|
||||
if "error_message" in data:
|
||||
raise ResponseError(data["error_message"])
|
||||
choice = data["choices"][0]
|
||||
if "content" in choice["delta"] and choice["delta"]["content"]:
|
||||
delta = choice["delta"]["content"]
|
||||
if first:
|
||||
delta = delta.lstrip()
|
||||
if delta:
|
||||
first = False
|
||||
yield delta
|
||||
finish = cls.read_finish_reason(choice)
|
||||
if finish is not None:
|
||||
yield finish
|
||||
|
||||
@staticmethod
|
||||
async def read_line(line: str, stream: bool):
|
||||
if line == b"[DONE]":
|
||||
return
|
||||
choice = json.loads(line)["choices"][0]
|
||||
if stream and "content" in choice["delta"] and choice["delta"]["content"]:
|
||||
yield choice["delta"]["content"]
|
||||
elif not stream and "content" in choice["message"]:
|
||||
yield choice["message"]["content"]
|
||||
def read_finish_reason(choice: dict) -> Optional[FinishReason]:
|
||||
if "finish_reason" in choice and choice["finish_reason"] is not None:
|
||||
yield FinishReason(choice["finish_reason"])
|
||||
return FinishReason(choice["finish_reason"])
|
||||
|
||||
@staticmethod
|
||||
def get_headers(api_key: str) -> dict:
|
||||
@classmethod
|
||||
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
||||
return {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Accept": "text/event-stream" if stream else "application/json",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
**(
|
||||
{"Authorization": f"Bearer {api_key}"}
|
||||
if cls.needs_auth and api_key is not None
|
||||
else {}
|
||||
),
|
||||
**({} if headers is None else headers)
|
||||
}
|
||||
|
||||
def filter_none(**kwargs) -> dict:
|
||||
return {
|
||||
key: value
|
||||
for key, value in kwargs.items()
|
||||
if value is not None
|
||||
}
|
@ -1,10 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult, Messages
|
||||
from ..base_provider import AbstractProvider, ProviderModelMixin
|
||||
from ...errors import MissingAuthError
|
||||
from ...typing import CreateResult, Messages
|
||||
from .Openai import Openai
|
||||
|
||||
models = {
|
||||
"theb-ai": "TheB.AI",
|
||||
@ -30,7 +27,7 @@ models = {
|
||||
"qwen-7b-chat": "Qwen 7B"
|
||||
}
|
||||
|
||||
class ThebApi(AbstractProvider, ProviderModelMixin):
|
||||
class ThebApi(Openai):
|
||||
url = "https://theb.ai"
|
||||
working = True
|
||||
needs_auth = True
|
||||
@ -38,44 +35,26 @@ class ThebApi(AbstractProvider, ProviderModelMixin):
|
||||
models = list(models)
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
auth: str = None,
|
||||
proxy: str = None,
|
||||
api_base: str = "https://api.theb.ai/v1",
|
||||
temperature: float = 1,
|
||||
top_p: float = 1,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
if not auth:
|
||||
raise MissingAuthError("Missing auth")
|
||||
headers = {
|
||||
'accept': 'application/json',
|
||||
'authorization': f'Bearer {auth}',
|
||||
'content-type': 'application/json',
|
||||
}
|
||||
# response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
|
||||
# models = dict([(m["id"], m["name"]) for m in response])
|
||||
# print(json.dumps(models, indent=4))
|
||||
data: dict[str, Any] = {
|
||||
"model": cls.get_model(model),
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
if "auth" in kwargs:
|
||||
kwargs["api_key"] = kwargs["auth"]
|
||||
system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
|
||||
if not system_message:
|
||||
system_message = "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."
|
||||
messages = [message for message in messages if message["role"] != "system"]
|
||||
data = {
|
||||
"model_params": {
|
||||
"system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
|
||||
"temperature": 1,
|
||||
"top_p": 1,
|
||||
**kwargs
|
||||
"system_prompt": system_message,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
}
|
||||
}
|
||||
response = requests.post(
|
||||
"https://api.theb.ai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=data,
|
||||
proxies={"https": proxy}
|
||||
)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
yield response.json()["choices"][0]["message"]["content"]
|
||||
except:
|
||||
raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}")
|
||||
return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
|
@ -3,7 +3,6 @@ from .Raycast import Raycast
|
||||
from .Theb import Theb
|
||||
from .ThebApi import ThebApi
|
||||
from .OpenaiChat import OpenaiChat
|
||||
from .OpenAssistant import OpenAssistant
|
||||
from .Poe import Poe
|
||||
from .Openai import Openai
|
||||
from .Groq import Groq
|
0
g4f/Provider/you/__init__.py
Normal file
0
g4f/Provider/you/__init__.py
Normal file
70
g4f/Provider/you/har_file.py
Normal file
70
g4f/Provider/you/har_file.py
Normal file
@ -0,0 +1,70 @@
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import uuid
|
||||
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
|
||||
class NoValidHarFileError(Exception):
|
||||
...
|
||||
|
||||
class arkReq:
|
||||
def __init__(self, arkURL, arkHeaders, arkBody, arkCookies, userAgent):
|
||||
self.arkURL = arkURL
|
||||
self.arkHeaders = arkHeaders
|
||||
self.arkBody = arkBody
|
||||
self.arkCookies = arkCookies
|
||||
self.userAgent = userAgent
|
||||
|
||||
arkPreURL = "https://telemetry.stytch.com/submit"
|
||||
chatArks: list = None
|
||||
|
||||
def readHAR():
|
||||
dirPath = "./"
|
||||
harPath = []
|
||||
chatArks = []
|
||||
for root, dirs, files in os.walk(dirPath):
|
||||
for file in files:
|
||||
if file.endswith(".har"):
|
||||
harPath.append(os.path.join(root, file))
|
||||
if harPath:
|
||||
break
|
||||
if not harPath:
|
||||
raise NoValidHarFileError("No .har file found")
|
||||
for path in harPath:
|
||||
with open(path, 'rb') as file:
|
||||
try:
|
||||
harFile = json.load(file)
|
||||
except json.JSONDecodeError:
|
||||
# Error: not a HAR file!
|
||||
continue
|
||||
for v in harFile['log']['entries']:
|
||||
if arkPreURL in v['request']['url']:
|
||||
chatArks.append(parseHAREntry(v))
|
||||
if not chatArks:
|
||||
raise NoValidHarFileError("No telemetry in .har files found")
|
||||
return chatArks
|
||||
|
||||
def parseHAREntry(entry) -> arkReq:
|
||||
tmpArk = arkReq(
|
||||
arkURL=entry['request']['url'],
|
||||
arkHeaders={h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')},
|
||||
arkBody=entry['request']['postData']['text'],
|
||||
arkCookies={c['name']: c['value'] for c in entry['request']['cookies']},
|
||||
userAgent=""
|
||||
)
|
||||
tmpArk.userAgent = tmpArk.arkHeaders.get('user-agent', '')
|
||||
return tmpArk
|
||||
|
||||
async def sendRequest(tmpArk: arkReq, proxy: str = None):
|
||||
async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxies={"all": proxy}) as session:
|
||||
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
|
||||
await raise_for_status(response)
|
||||
return await response.text()
|
||||
|
||||
async def get_dfp_telemetry_id(proxy: str = None):
|
||||
return str(uuid.uuid4())
|
||||
global chatArks
|
||||
if chatArks is None:
|
||||
chatArks = readHAR()
|
||||
return await sendRequest(random.choice(chatArks), proxy)
|
126
g4f/__init__.py
126
g4f/__init__.py
@ -2,95 +2,14 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from .errors import *
|
||||
from .models import Model, ModelUtils
|
||||
from .Provider import AsyncGeneratorProvider, ProviderUtils
|
||||
from .typing import Messages, CreateResult, AsyncResult, Union
|
||||
from .cookies import get_cookies, set_cookies
|
||||
from . import debug, version
|
||||
from .providers.types import BaseRetryProvider, ProviderType
|
||||
from .providers.base_provider import ProviderModelMixin
|
||||
from .providers.retry_provider import IterProvider
|
||||
|
||||
def get_model_and_provider(model : Union[Model, str],
|
||||
provider : Union[ProviderType, str, None],
|
||||
stream : bool,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
ignore_stream: bool = False,
|
||||
**kwargs) -> tuple[str, ProviderType]:
|
||||
"""
|
||||
Retrieves the model and provider based on input parameters.
|
||||
|
||||
Args:
|
||||
model (Union[Model, str]): The model to use, either as an object or a string identifier.
|
||||
provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
|
||||
stream (bool): Indicates if the operation should be performed as a stream.
|
||||
ignored (list[str], optional): List of provider names to be ignored.
|
||||
ignore_working (bool, optional): If True, ignores the working status of the provider.
|
||||
ignore_stream (bool, optional): If True, ignores the streaming capability of the provider.
|
||||
|
||||
Returns:
|
||||
tuple[str, ProviderType]: A tuple containing the model name and the provider type.
|
||||
|
||||
Raises:
|
||||
ProviderNotFoundError: If the provider is not found.
|
||||
ModelNotFoundError: If the model is not found.
|
||||
ProviderNotWorkingError: If the provider is not working.
|
||||
StreamNotSupportedError: If streaming is not supported by the provider.
|
||||
"""
|
||||
if debug.version_check:
|
||||
debug.version_check = False
|
||||
version.utils.check_version()
|
||||
|
||||
if isinstance(provider, str):
|
||||
if " " in provider:
|
||||
provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
|
||||
if not provider_list:
|
||||
raise ProviderNotFoundError(f'Providers not found: {provider}')
|
||||
provider = IterProvider(provider_list)
|
||||
elif provider in ProviderUtils.convert:
|
||||
provider = ProviderUtils.convert[provider]
|
||||
elif provider:
|
||||
raise ProviderNotFoundError(f'Provider not found: {provider}')
|
||||
|
||||
if isinstance(model, str):
|
||||
if model in ModelUtils.convert:
|
||||
model = ModelUtils.convert[model]
|
||||
|
||||
if not provider:
|
||||
if isinstance(model, str):
|
||||
raise ModelNotFoundError(f'Model not found: {model}')
|
||||
provider = model.best_provider
|
||||
|
||||
if not provider:
|
||||
raise ProviderNotFoundError(f'No provider found for model: {model}')
|
||||
|
||||
if isinstance(model, Model):
|
||||
model = model.name
|
||||
|
||||
if not ignore_working and not provider.working:
|
||||
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
||||
|
||||
if not ignore_working and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.working]
|
||||
|
||||
if ignored and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not ignore_stream and not provider.supports_stream and stream:
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
||||
|
||||
if debug.logging:
|
||||
if model:
|
||||
print(f'Using {provider.__name__} provider and {model} model')
|
||||
else:
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
debug.last_provider = provider
|
||||
debug.last_model = model
|
||||
|
||||
return model, provider
|
||||
from . import debug, version
|
||||
from .models import Model
|
||||
from .typing import Messages, CreateResult, AsyncResult, Union
|
||||
from .errors import StreamNotSupportedError, ModelNotAllowedError
|
||||
from .cookies import get_cookies, set_cookies
|
||||
from .providers.types import ProviderType
|
||||
from .providers.base_provider import AsyncGeneratorProvider
|
||||
from .client.service import get_model_and_provider, get_last_provider
|
||||
|
||||
class ChatCompletion:
|
||||
@staticmethod
|
||||
@ -134,7 +53,7 @@ class ChatCompletion:
|
||||
ignore_stream or kwargs.get("ignore_stream_and_auth")
|
||||
)
|
||||
|
||||
if auth:
|
||||
if auth is not None:
|
||||
kwargs['auth'] = auth
|
||||
|
||||
if "proxy" not in kwargs:
|
||||
@ -154,6 +73,7 @@ class ChatCompletion:
|
||||
provider : Union[ProviderType, str, None] = None,
|
||||
stream : bool = False,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
patch_provider: callable = None,
|
||||
**kwargs) -> Union[AsyncResult, str]:
|
||||
"""
|
||||
@ -174,7 +94,7 @@ class ChatCompletion:
|
||||
Raises:
|
||||
StreamNotSupportedError: If streaming is requested but not supported by the provider.
|
||||
"""
|
||||
model, provider = get_model_and_provider(model, provider, False, ignored)
|
||||
model, provider = get_model_and_provider(model, provider, False, ignored, ignore_working)
|
||||
|
||||
if stream:
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
@ -225,26 +145,4 @@ class Completion:
|
||||
|
||||
result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
|
||||
|
||||
return result if stream else ''.join(result)
|
||||
|
||||
def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str]]:
|
||||
"""
|
||||
Retrieves the last used provider.
|
||||
|
||||
Args:
|
||||
as_dict (bool, optional): If True, returns the provider information as a dictionary.
|
||||
|
||||
Returns:
|
||||
Union[ProviderType, dict[str, str]]: The last used provider, either as an object or a dictionary.
|
||||
"""
|
||||
last = debug.last_provider
|
||||
if isinstance(last, BaseRetryProvider):
|
||||
last = last.last_provider
|
||||
if last and as_dict:
|
||||
return {
|
||||
"name": last.__name__,
|
||||
"url": last.url,
|
||||
"model": debug.last_model,
|
||||
"models": last.models if isinstance(last, ProviderModelMixin) else []
|
||||
}
|
||||
return last
|
||||
return result if stream else ''.join(result)
|
@ -3,10 +3,13 @@ import json
|
||||
import uvicorn
|
||||
import nest_asyncio
|
||||
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Union
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Union
|
||||
|
||||
import g4f
|
||||
import g4f.debug
|
||||
@ -39,6 +42,25 @@ class Api:
|
||||
self.app = FastAPI()
|
||||
|
||||
self.routes()
|
||||
self.register_validation_exception_handler()
|
||||
|
||||
def register_validation_exception_handler(self):
|
||||
@self.app.exception_handler(RequestValidationError)
|
||||
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
||||
details = exc.errors()
|
||||
modified_details = []
|
||||
for error in details:
|
||||
modified_details.append(
|
||||
{
|
||||
"loc": error["loc"],
|
||||
"message": error["msg"],
|
||||
"type": error["type"],
|
||||
}
|
||||
)
|
||||
return JSONResponse(
|
||||
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
||||
content=jsonable_encoder({"detail": modified_details}),
|
||||
)
|
||||
|
||||
def routes(self):
|
||||
@self.app.get("/")
|
||||
|
@ -0,0 +1,3 @@
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse
|
||||
from .client import Client
|
||||
from .async_client import AsyncClient
|
@ -1,20 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
import string
|
||||
|
||||
from .types import Client as BaseClient
|
||||
from .types import BaseProvider, ProviderType, FinishReason
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
|
||||
from ..typing import Union, Iterator, Messages, ImageType, AsyncIerator
|
||||
from .types import ProviderType, FinishReason
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse, Image
|
||||
from .types import AsyncIterResponse, ImageProvider
|
||||
from .image_models import ImageModels
|
||||
from .helper import filter_json, find_stop, filter_none, cast_iter_async
|
||||
from .service import get_last_provider, get_model_and_provider
|
||||
from ..typing import Union, Iterator, Messages, AsyncIterator, ImageType
|
||||
from ..errors import NoImageResponseError
|
||||
from ..image import ImageResponse as ImageProviderResponse
|
||||
from ..errors import NoImageResponseError, RateLimitError, MissingAuthError
|
||||
from .. import get_model_and_provider, get_last_provider
|
||||
from .helper import read_json, find_stop, filter_none
|
||||
ä
|
||||
from ..providers.base_provider import AsyncGeneratorProvider
|
||||
|
||||
async def iter_response(
|
||||
response: AsyncIterator[str],
|
||||
stream: bool,
|
||||
@ -47,10 +48,10 @@ async def iter_response(
|
||||
else:
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
content = read_json(content)
|
||||
content = filter_json(content)
|
||||
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
|
||||
|
||||
async def iter_append_model_and_provider(response: AsyncIterResponse) -> IterResponse:
|
||||
async def iter_append_model_and_provider(response: AsyncIterResponse) -> AsyncIterResponse:
|
||||
last_provider = None
|
||||
async for chunk in response:
|
||||
last_provider = get_last_provider(True) if last_provider is None else last_provider
|
||||
@ -58,51 +59,50 @@ async def iter_append_model_and_provider(response: AsyncIterResponse) -> IterRes
|
||||
chunk.provider = last_provider.get("name")
|
||||
yield chunk
|
||||
|
||||
class Client(BaseClient):
|
||||
class AsyncClient(BaseClient):
|
||||
def __init__(
|
||||
self,
|
||||
provider: ProviderType = None,
|
||||
image_provider: ImageProvider = None,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.chat: Chat = Chat(self, provider)
|
||||
self.images: Images = Images(self, image_provider)
|
||||
|
||||
async def cast_iter_async(iter):
|
||||
for chunk in iter:
|
||||
yield chunk
|
||||
|
||||
def create_response(
|
||||
messages: Messages,
|
||||
model: str,
|
||||
provider: ProviderType = None,
|
||||
stream: bool = False,
|
||||
response_format: dict = None,
|
||||
proxy: str = None,
|
||||
max_tokens: int = None,
|
||||
stop: Union[list[str], str] = None,
|
||||
stop: list[str] = None,
|
||||
api_key: str = None,
|
||||
**kwargs
|
||||
):
|
||||
if hasattr(provider, "create_async_generator):
|
||||
has_asnyc = isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider)
|
||||
if has_asnyc:
|
||||
create = provider.create_async_generator
|
||||
else:
|
||||
create = provider.create_completion
|
||||
response = create(
|
||||
model, messages, stream,
|
||||
**filter_none(
|
||||
proxy=self.client.get_proxy(),
|
||||
proxy=proxy,
|
||||
max_tokens=max_tokens,
|
||||
stop=stop,
|
||||
api_key=self.client.api_key if api_key is None else api_key
|
||||
api_key=api_key
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
if not hasattr(provider, "create_async_generator")
|
||||
if not has_asnyc:
|
||||
response = cast_iter_async(response)
|
||||
return response
|
||||
|
||||
class Completions():
|
||||
def __init__(self, client: Client, provider: ProviderType = None):
|
||||
self.client: Client = client
|
||||
def __init__(self, client: AsyncClient, provider: ProviderType = None):
|
||||
self.client: AsyncClient = client
|
||||
self.provider: ProviderType = provider
|
||||
|
||||
def create(
|
||||
@ -111,6 +111,10 @@ class Completions():
|
||||
model: str,
|
||||
provider: ProviderType = None,
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
max_tokens: int = None,
|
||||
stop: Union[list[str], str] = None,
|
||||
api_key: str = None,
|
||||
response_format: dict = None,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
@ -123,11 +127,18 @@ class Completions():
|
||||
stream,
|
||||
ignored,
|
||||
ignore_working,
|
||||
ignore_stream,
|
||||
**kwargs
|
||||
ignore_stream
|
||||
)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
response = create_response(messages, model, provider, stream, **kwargs)
|
||||
response = create_response(
|
||||
messages, model,
|
||||
provider, stream,
|
||||
proxy=self.client.get_proxy() if proxy is None else proxy,
|
||||
max_tokens=max_tokens,
|
||||
stop=stop,
|
||||
api_key=self.client.api_key if api_key is None else api_key
|
||||
**kwargs
|
||||
)
|
||||
response = iter_response(response, stream, response_format, max_tokens, stop)
|
||||
response = iter_append_model_and_provider(response)
|
||||
return response if stream else anext(response)
|
||||
@ -135,44 +146,40 @@ class Completions():
|
||||
class Chat():
|
||||
completions: Completions
|
||||
|
||||
def __init__(self, client: Client, provider: ProviderType = None):
|
||||
def __init__(self, client: AsyncClient, provider: ProviderType = None):
|
||||
self.completions = Completions(client, provider)
|
||||
|
||||
async def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
|
||||
async for chunk in list(response):
|
||||
async for chunk in response:
|
||||
if isinstance(chunk, ImageProviderResponse):
|
||||
return ImagesResponse([Image(image) for image in chunk.get_list()])
|
||||
|
||||
def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
|
||||
def create_image(client: AsyncClient, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
|
||||
prompt = f"create a image with: {prompt}"
|
||||
if provider.__name__ == "You":
|
||||
kwargs["chat_mode"] = "create"
|
||||
return provider.create_async_generator(
|
||||
model,
|
||||
[{"role": "user", "content": prompt}],
|
||||
True,
|
||||
stream=True,
|
||||
proxy=client.get_proxy(),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
class Images():
|
||||
def __init__(self, client: Client, provider: ImageProvider = None):
|
||||
self.client: Client = client
|
||||
def __init__(self, client: AsyncClient, provider: ImageProvider = None):
|
||||
self.client: AsyncClient = client
|
||||
self.provider: ImageProvider = provider
|
||||
self.models: ImageModels = ImageModels(client)
|
||||
|
||||
async def generate(self, prompt, model: str = None, **kwargs) -> ImagesResponse:
|
||||
async def generate(self, prompt, model: str = "", **kwargs) -> ImagesResponse:
|
||||
provider = self.models.get(model, self.provider)
|
||||
if isinstance(provider, type) and issubclass(provider, BaseProvider):
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
response = create_image(self.client, provider, prompt, **kwargs)
|
||||
else:
|
||||
try:
|
||||
response = list(provider.create(prompt))
|
||||
except (RateLimitError, MissingAuthError) as e:
|
||||
# Fallback for default provider
|
||||
if self.provider is None:
|
||||
response = create_image(self.client, self.models.you, prompt, model or "dall-e", **kwargs)
|
||||
else:
|
||||
raise e
|
||||
image = iter_image_response(response)
|
||||
response = await provider.create_async(prompt)
|
||||
return ImagesResponse([Image(image) for image in response.get_list()])
|
||||
image = await iter_image_response(response)
|
||||
if image is None:
|
||||
raise NoImageResponseError()
|
||||
return image
|
||||
@ -180,7 +187,7 @@ class Images():
|
||||
async def create_variation(self, image: ImageType, model: str = None, **kwargs):
|
||||
provider = self.models.get(model, self.provider)
|
||||
result = None
|
||||
if isinstance(provider, type) and issubclass(provider, BaseProvider):
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
response = provider.create_async_generator(
|
||||
"",
|
||||
[{"role": "user", "content": "create a image like this"}],
|
||||
@ -189,10 +196,7 @@ class Images():
|
||||
proxy=self.client.get_proxy(),
|
||||
**kwargs
|
||||
)
|
||||
async for chunk in response:
|
||||
if isinstance(chunk, ImageProviderResponse):
|
||||
result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
|
||||
result = ImagesResponse([Image(image)for image in result])
|
||||
result = iter_image_response(response)
|
||||
if result is None:
|
||||
raise NoImageResponseError()
|
||||
return result
|
||||
return result
|
@ -1,40 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
import string
|
||||
|
||||
from ..typing import Union, Iterator, Messages, ImageType
|
||||
from ..providers.types import BaseProvider, ProviderType, FinishReason
|
||||
from ..image import ImageResponse as ImageProviderResponse
|
||||
from ..errors import NoImageResponseError
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
|
||||
from .typing import Union, Iterator, Messages, ImageType
|
||||
from .providers.types import BaseProvider, ProviderType, FinishReason
|
||||
from .image import ImageResponse as ImageProviderResponse
|
||||
from .errors import NoImageResponseError, RateLimitError, MissingAuthError
|
||||
from . import get_model_and_provider, get_last_provider
|
||||
|
||||
from .Provider.BingCreateImages import BingCreateImages
|
||||
from .Provider.needs_auth import Gemini, OpenaiChat
|
||||
from .Provider.You import You
|
||||
|
||||
ImageProvider = Union[BaseProvider, object]
|
||||
Proxies = Union[dict, str]
|
||||
IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
|
||||
|
||||
def read_json(text: str) -> dict:
|
||||
"""
|
||||
Parses JSON code block from a string.
|
||||
|
||||
Args:
|
||||
text (str): A string containing a JSON code block.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary parsed from the JSON code block.
|
||||
"""
|
||||
match = re.search(r"```(json|)\n(?P<code>[\S\s]+?)\n```", text)
|
||||
if match:
|
||||
return match.group("code")
|
||||
return text
|
||||
from .image_models import ImageModels
|
||||
from .types import IterResponse, ImageProvider
|
||||
from .types import Client as BaseClient
|
||||
from .service import get_model_and_provider, get_last_provider
|
||||
from .helper import find_stop, filter_json, filter_none
|
||||
|
||||
def iter_response(
|
||||
response: iter[str],
|
||||
@ -53,20 +32,7 @@ def iter_response(
|
||||
content += str(chunk)
|
||||
if max_tokens is not None and idx + 1 >= max_tokens:
|
||||
finish_reason = "length"
|
||||
first = -1
|
||||
word = None
|
||||
if stop is not None:
|
||||
for word in list(stop):
|
||||
first = content.find(word)
|
||||
if first != -1:
|
||||
content = content[:first]
|
||||
break
|
||||
if stream and first != -1:
|
||||
first = chunk.find(word)
|
||||
if first != -1:
|
||||
chunk = chunk[:first]
|
||||
else:
|
||||
first = 0
|
||||
first, content, chunk = find_stop(stop, content, chunk if stream else None)
|
||||
if first != -1:
|
||||
finish_reason = "stop"
|
||||
if stream:
|
||||
@ -79,7 +45,7 @@ def iter_response(
|
||||
else:
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
content = read_json(content)
|
||||
content = filter_json(content)
|
||||
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
|
||||
|
||||
def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
|
||||
@ -90,37 +56,17 @@ def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
|
||||
chunk.provider = last_provider.get("name")
|
||||
yield chunk
|
||||
|
||||
class Client():
|
||||
|
||||
class Client(BaseClient):
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str = None,
|
||||
proxies: Proxies = None,
|
||||
provider: ProviderType = None,
|
||||
image_provider: ImageProvider = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
self.api_key: str = api_key
|
||||
self.proxies: Proxies = proxies
|
||||
super().__init__(**kwargs)
|
||||
self.chat: Chat = Chat(self, provider)
|
||||
self.images: Images = Images(self, image_provider)
|
||||
|
||||
def get_proxy(self) -> Union[str, None]:
|
||||
if isinstance(self.proxies, str):
|
||||
return self.proxies
|
||||
elif self.proxies is None:
|
||||
return os.environ.get("G4F_PROXY")
|
||||
elif "all" in self.proxies:
|
||||
return self.proxies["all"]
|
||||
elif "https" in self.proxies:
|
||||
return self.proxies["https"]
|
||||
|
||||
def filter_none(**kwargs):
|
||||
for key in list(kwargs.keys()):
|
||||
if kwargs[key] is None:
|
||||
del kwargs[key]
|
||||
return kwargs
|
||||
|
||||
class Completions():
|
||||
def __init__(self, client: Client, provider: ProviderType = None):
|
||||
self.client: Client = client
|
||||
@ -132,6 +78,7 @@ class Completions():
|
||||
model: str,
|
||||
provider: ProviderType = None,
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
response_format: dict = None,
|
||||
max_tokens: int = None,
|
||||
stop: Union[list[str], str] = None,
|
||||
@ -148,13 +95,12 @@ class Completions():
|
||||
ignored,
|
||||
ignore_working,
|
||||
ignore_stream,
|
||||
**kwargs
|
||||
)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
response = provider.create_completion(
|
||||
model, messages, stream,
|
||||
**filter_none(
|
||||
proxy=self.client.get_proxy(),
|
||||
proxy=self.client.get_proxy() if proxy is None else proxy,
|
||||
max_tokens=max_tokens,
|
||||
stop=stop,
|
||||
api_key=self.client.api_key if api_key is None else api_key
|
||||
@ -171,18 +117,6 @@ class Chat():
|
||||
def __init__(self, client: Client, provider: ProviderType = None):
|
||||
self.completions = Completions(client, provider)
|
||||
|
||||
class ImageModels():
|
||||
gemini = Gemini
|
||||
openai = OpenaiChat
|
||||
you = You
|
||||
|
||||
def __init__(self, client: Client) -> None:
|
||||
self.client = client
|
||||
self.default = BingCreateImages(proxy=self.client.get_proxy())
|
||||
|
||||
def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
|
||||
return getattr(self, name) if hasattr(self, name) else default or self.default
|
||||
|
||||
def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
|
||||
for chunk in list(response):
|
||||
if isinstance(chunk, ImageProviderResponse):
|
||||
@ -190,10 +124,12 @@ def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
|
||||
|
||||
def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
|
||||
prompt = f"create a image with: {prompt}"
|
||||
if provider.__name__ == "You":
|
||||
kwargs["chat_mode"] = "create"
|
||||
return provider.create_completion(
|
||||
model,
|
||||
[{"role": "user", "content": prompt}],
|
||||
True,
|
||||
stream=True,
|
||||
proxy=client.get_proxy(),
|
||||
**kwargs
|
||||
)
|
||||
@ -209,14 +145,7 @@ class Images():
|
||||
if isinstance(provider, type) and issubclass(provider, BaseProvider):
|
||||
response = create_image(self.client, provider, prompt, **kwargs)
|
||||
else:
|
||||
try:
|
||||
response = list(provider.create(prompt))
|
||||
except (RateLimitError, MissingAuthError) as e:
|
||||
# Fallback for default provider
|
||||
if self.provider is None:
|
||||
response = create_image(self.client, self.models.you, prompt, model or "dall-e", **kwargs)
|
||||
else:
|
||||
raise e
|
||||
response = list(provider.create(prompt))
|
||||
image = iter_image_response(response)
|
||||
if image is None:
|
||||
raise NoImageResponseError()
|
||||
@ -234,10 +163,7 @@ class Images():
|
||||
proxy=self.client.get_proxy(),
|
||||
**kwargs
|
||||
)
|
||||
for chunk in response:
|
||||
if isinstance(chunk, ImageProviderResponse):
|
||||
result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
|
||||
result = ImagesResponse([Image(image)for image in result])
|
||||
result = iter_image_response(response)
|
||||
if result is None:
|
||||
raise NoImageResponseError()
|
||||
return result
|
@ -1,6 +1,9 @@
|
||||
import re
|
||||
from __future__ import annotations
|
||||
|
||||
def read_json(text: str) -> dict:
|
||||
import re
|
||||
from typing import Iterable, AsyncIterator
|
||||
|
||||
def filter_json(text: str) -> str:
|
||||
"""
|
||||
Parses JSON code block from a string.
|
||||
|
||||
@ -15,7 +18,7 @@ def read_json(text: str) -> dict:
|
||||
return match.group("code")
|
||||
return text
|
||||
|
||||
def find_stop(stop, content: str, chunk: str):
|
||||
def find_stop(stop, content: str, chunk: str = None):
|
||||
first = -1
|
||||
word = None
|
||||
if stop is not None:
|
||||
@ -24,10 +27,21 @@ def find_stop(stop, content: str, chunk: str):
|
||||
if first != -1:
|
||||
content = content[:first]
|
||||
break
|
||||
if stream and first != -1:
|
||||
if chunk is not None and first != -1:
|
||||
first = chunk.find(word)
|
||||
if first != -1:
|
||||
chunk = chunk[:first]
|
||||
else:
|
||||
first = 0
|
||||
return first, content, chunk
|
||||
|
||||
def filter_none(**kwargs) -> dict:
|
||||
return {
|
||||
key: value
|
||||
for key, value in kwargs.items()
|
||||
if value is not None
|
||||
}
|
||||
|
||||
async def cast_iter_async(iter: Iterable) -> AsyncIterator:
|
||||
for chunk in iter:
|
||||
yield chunk
|
@ -1,8 +1,10 @@
|
||||
from .Provider.BingCreateImages import BingCreateImages
|
||||
from .Provider.needs_auth import Gemini, OpenaiChat
|
||||
from ..Provider.You import You
|
||||
from __future__ import annotations
|
||||
|
||||
from .types import Client
|
||||
from .types import Client, ImageProvider
|
||||
|
||||
from ..Provider.BingCreateImages import BingCreateImages
|
||||
from ..Provider.needs_auth import Gemini, OpenaiChat
|
||||
from ..Provider.You import You
|
||||
|
||||
class ImageModels():
|
||||
gemini = Gemini
|
||||
|
114
g4f/client/service.py
Normal file
114
g4f/client/service.py
Normal file
@ -0,0 +1,114 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
|
||||
from .. import debug, version
|
||||
from ..errors import ProviderNotFoundError, ModelNotFoundError, ProviderNotWorkingError, StreamNotSupportedError
|
||||
from ..models import Model, ModelUtils
|
||||
from ..Provider import ProviderUtils
|
||||
from ..providers.types import BaseRetryProvider, ProviderType
|
||||
from ..providers.retry_provider import IterProvider
|
||||
|
||||
def convert_to_provider(provider: str) -> ProviderType:
|
||||
if " " in provider:
|
||||
provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
|
||||
if not provider_list:
|
||||
raise ProviderNotFoundError(f'Providers not found: {provider}')
|
||||
provider = IterProvider(provider_list)
|
||||
elif provider in ProviderUtils.convert:
|
||||
provider = ProviderUtils.convert[provider]
|
||||
elif provider:
|
||||
raise ProviderNotFoundError(f'Provider not found: {provider}')
|
||||
return provider
|
||||
|
||||
def get_model_and_provider(model : Union[Model, str],
|
||||
provider : Union[ProviderType, str, None],
|
||||
stream : bool,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
ignore_stream: bool = False) -> tuple[str, ProviderType]:
|
||||
"""
|
||||
Retrieves the model and provider based on input parameters.
|
||||
|
||||
Args:
|
||||
model (Union[Model, str]): The model to use, either as an object or a string identifier.
|
||||
provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
|
||||
stream (bool): Indicates if the operation should be performed as a stream.
|
||||
ignored (list[str], optional): List of provider names to be ignored.
|
||||
ignore_working (bool, optional): If True, ignores the working status of the provider.
|
||||
ignore_stream (bool, optional): If True, ignores the streaming capability of the provider.
|
||||
|
||||
Returns:
|
||||
tuple[str, ProviderType]: A tuple containing the model name and the provider type.
|
||||
|
||||
Raises:
|
||||
ProviderNotFoundError: If the provider is not found.
|
||||
ModelNotFoundError: If the model is not found.
|
||||
ProviderNotWorkingError: If the provider is not working.
|
||||
StreamNotSupportedError: If streaming is not supported by the provider.
|
||||
"""
|
||||
if debug.version_check:
|
||||
debug.version_check = False
|
||||
version.utils.check_version()
|
||||
|
||||
if isinstance(provider, str):
|
||||
provider = convert_to_provider(provider)
|
||||
|
||||
if isinstance(model, str):
|
||||
if model in ModelUtils.convert:
|
||||
model = ModelUtils.convert[model]
|
||||
|
||||
if not provider:
|
||||
if isinstance(model, str):
|
||||
raise ModelNotFoundError(f'Model not found: {model}')
|
||||
provider = model.best_provider
|
||||
|
||||
if not provider:
|
||||
raise ProviderNotFoundError(f'No provider found for model: {model}')
|
||||
|
||||
if isinstance(model, Model):
|
||||
model = model.name
|
||||
|
||||
if not ignore_working and not provider.working:
|
||||
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
||||
|
||||
if not ignore_working and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.working]
|
||||
|
||||
if ignored and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not ignore_stream and not provider.supports_stream and stream:
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
||||
|
||||
if debug.logging:
|
||||
if model:
|
||||
print(f'Using {provider.__name__} provider and {model} model')
|
||||
else:
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
debug.last_provider = provider
|
||||
debug.last_model = model
|
||||
|
||||
return model, provider
|
||||
|
||||
def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str]]:
|
||||
"""
|
||||
Retrieves the last used provider.
|
||||
|
||||
Args:
|
||||
as_dict (bool, optional): If True, returns the provider information as a dictionary.
|
||||
|
||||
Returns:
|
||||
Union[ProviderType, dict[str, str]]: The last used provider, either as an object or a dictionary.
|
||||
"""
|
||||
last = debug.last_provider
|
||||
if isinstance(last, BaseRetryProvider):
|
||||
last = last.last_provider
|
||||
if last and as_dict:
|
||||
return {
|
||||
"name": last.__name__,
|
||||
"url": last.url,
|
||||
"model": debug.last_model,
|
||||
}
|
||||
return last
|
@ -1,9 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk
|
||||
from ..providers.types import BaseProvider, ProviderType, FinishReason
|
||||
from typing import Union, Iterator
|
||||
from typing import Union, Iterator, AsyncIterator
|
||||
|
||||
ImageProvider = Union[BaseProvider, object]
|
||||
Proxies = Union[dict, str]
|
||||
IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
|
||||
AsyncIterResponse = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]
|
||||
|
||||
class ClientProxyMixin():
|
||||
def get_proxy(self) -> Union[str, None]:
|
||||
@ -21,9 +27,7 @@ class Client(ClientProxyMixin):
|
||||
self,
|
||||
api_key: str = None,
|
||||
proxies: Proxies = None,
|
||||
provider: ProviderType = None,
|
||||
image_provider: ImageProvider = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
self.api_key: str = api_key
|
||||
self.proxies: Proxies = proxies
|
||||
self.proxies: Proxies = proxies
|
@ -10,7 +10,7 @@ except ImportError as e:
|
||||
|
||||
def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
|
||||
if import_error is not None:
|
||||
raise MissingRequirementsError(f'Install "gui" requirements | pip install g4f[gui] -U\n{import_error}')
|
||||
raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
|
||||
|
||||
if debug:
|
||||
from g4f import debug
|
||||
@ -20,7 +20,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
|
||||
'port' : port,
|
||||
'debug': debug
|
||||
}
|
||||
|
||||
|
||||
site = Website(app)
|
||||
for route in site.routes:
|
||||
app.add_url_rule(
|
||||
@ -28,7 +28,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
|
||||
view_func = site.routes[route]['function'],
|
||||
methods = site.routes[route]['methods'],
|
||||
)
|
||||
|
||||
|
||||
backend_api = Backend_Api(app)
|
||||
for route in backend_api.routes:
|
||||
app.add_url_rule(
|
||||
@ -36,7 +36,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
|
||||
view_func = backend_api.routes[route]['function'],
|
||||
methods = backend_api.routes[route]['methods'],
|
||||
)
|
||||
|
||||
|
||||
print(f"Running on port {config['port']}")
|
||||
app.run(**config)
|
||||
print(f"Closing port {config['port']}")
|
||||
|
@ -77,17 +77,35 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="settings">
|
||||
<textarea name="OpenaiChat[api_key]" class="box" placeholder="OpenaiChat: accessToken"></textarea>
|
||||
<div class="field">
|
||||
<input id="auto_continue" type="checkbox" name="OpenaiChat[auto_continue]" checked/>
|
||||
<label for="auto_continue" title=""></label>
|
||||
<span class="about">Auto Continue</span>
|
||||
<div class="field box">
|
||||
<label for="OpenaiChat-api_key" class="label" title="">OpenaiChat: access_token</label>
|
||||
<textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field">
|
||||
<span class="label">OpenaiChat: Auto continue</span>
|
||||
<input id="OpenaiChat-auto_continue" type="checkbox" name="OpenaiChat[auto_continue]" checked/>
|
||||
<label for="OpenaiChat-auto_continue" class="toogle" title=""></label>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="Bing-api_key" class="label" title="">Bing: "_U" cookie</label>
|
||||
<textarea id="Bing-api_key" name="Bing[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="Gemini-api_key" class="label" title="">Gemini: Auth cookies</label>
|
||||
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="Openai-api_key" class="label" title="">Openai: api_key</label>
|
||||
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
|
||||
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
|
||||
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<textarea name="Bing[api_key]" class="box" placeholder="Bing: _U cookie"></textarea>
|
||||
<textarea name="Gemini[api_key]" class="box" placeholder="Gemini: Auth cookies"></textarea>
|
||||
<textarea name="Openai[api_key]" class="box" placeholder="Openai: api_key></textarea>
|
||||
<textarea name="Grok[api_key]" class="box" placeholder="Grok: api_key"></textarea>
|
||||
<textarea name="GeminiPro[api_key]" class="box" placeholder="GeminiPro: api_key"></textarea>
|
||||
</div>
|
||||
<div class="conversation">
|
||||
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
|
||||
|
@ -520,7 +520,7 @@ label[for="camera"] {
|
||||
}
|
||||
|
||||
.buttons label,
|
||||
.settings label {
|
||||
.settings label.toogle {
|
||||
cursor: pointer;
|
||||
text-indent: -9999px;
|
||||
width: 50px;
|
||||
@ -538,7 +538,7 @@ label[for="camera"] {
|
||||
}
|
||||
|
||||
.buttons label:after,
|
||||
.settings label:after {
|
||||
.settings label.toogle:after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
@ -560,17 +560,13 @@ label[for="camera"] {
|
||||
left: calc(100% - 5px - 20px);
|
||||
}
|
||||
|
||||
.buttons, .settings {
|
||||
.buttons {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: left;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.settings textarea{
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
.field {
|
||||
height: fit-content;
|
||||
display: flex;
|
||||
@ -1017,7 +1013,7 @@ a:-webkit-any-link {
|
||||
border: 1px solid #e4d4ffc9;
|
||||
}
|
||||
|
||||
#systemPrompt {
|
||||
#systemPrompt, .settings textarea {
|
||||
font-size: 15px;
|
||||
width: 100%;
|
||||
color: var(--colour-3);
|
||||
@ -1028,6 +1024,30 @@ a:-webkit-any-link {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
.settings {
|
||||
width: 100%;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.settings .field {
|
||||
margin: var(--inner-gap) 0;
|
||||
}
|
||||
|
||||
.settings textarea {
|
||||
background-color: transparent;
|
||||
border: none;
|
||||
padding: var(--inner-gap) 0;
|
||||
}
|
||||
|
||||
.settings .label {
|
||||
font-size: 15px;
|
||||
padding: var(--inner-gap) 0;
|
||||
width: fit-content;
|
||||
min-width: 190px;
|
||||
margin-left: var(--section-gap);
|
||||
white-space:nowrap;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ class Api():
|
||||
if conversation_id and provider in conversations and conversation_id in conversations[provider]:
|
||||
kwargs["conversation"] = conversations[provider][conversation_id]
|
||||
|
||||
model = json_data.get('model', models.default)
|
||||
model = json_data.get('model') or models.default
|
||||
|
||||
return {
|
||||
"model": model,
|
||||
@ -169,4 +169,8 @@ def get_error_message(exception: Exception) -> str:
|
||||
Returns:
|
||||
str: A formatted error message string.
|
||||
"""
|
||||
return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
|
||||
message = f"{type(exception).__name__}: {exception}"
|
||||
provider = get_last_provider()
|
||||
if provider is None:
|
||||
return message
|
||||
return f"{provider.__name__}: {message}"
|
@ -31,7 +31,7 @@ def run_webview(
|
||||
f"g4f - {g4f.version.utils.current_version}",
|
||||
os.path.join(dirname, "client/index.html"),
|
||||
text_select=True,
|
||||
js_api=Api(),
|
||||
js_api=JsApi(),
|
||||
)
|
||||
if has_platformdirs and storage_path is None:
|
||||
storage_path = user_config_dir("g4f-webview")
|
||||
|
@ -1,17 +1,17 @@
|
||||
from ..typing import Union, Iterator, Messages
|
||||
from ..stubs import ChatCompletion, ChatCompletionChunk
|
||||
from ._engine import LocalProvider
|
||||
from ._models import models
|
||||
from ..client import iter_response, filter_none, IterResponse
|
||||
from ..typing import Union, Messages
|
||||
from ..locals.provider import LocalProvider
|
||||
from ..locals.models import get_models
|
||||
from ..client.client import iter_response, filter_none
|
||||
from ..client.types import IterResponse
|
||||
|
||||
class LocalClient():
|
||||
def __init__(self, **kwargs) -> None:
|
||||
self.chat: Chat = Chat(self)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def list_models():
|
||||
return list(models.keys())
|
||||
|
||||
return list(get_models())
|
||||
|
||||
class Completions():
|
||||
def __init__(self, client: LocalClient):
|
||||
self.client: LocalClient = client
|
||||
@ -25,8 +25,7 @@ class Completions():
|
||||
max_tokens: int = None,
|
||||
stop: Union[list[str], str] = None,
|
||||
**kwargs
|
||||
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
|
||||
|
||||
) -> IterResponse:
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
response = LocalProvider.create_completion(
|
||||
model, messages, stream,
|
||||
|
@ -1,42 +0,0 @@
|
||||
import os
|
||||
|
||||
from gpt4all import GPT4All
|
||||
from ._models import models
|
||||
|
||||
class LocalProvider:
|
||||
@staticmethod
|
||||
def create_completion(model, messages, stream, **kwargs):
|
||||
if model not in models:
|
||||
raise ValueError(f"Model '{model}' not found / not yet implemented")
|
||||
|
||||
model = models[model]
|
||||
model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models/')
|
||||
full_model_path = os.path.join(model_dir, model['path'])
|
||||
|
||||
if not os.path.isfile(full_model_path):
|
||||
print(f"Model file '{full_model_path}' not found.")
|
||||
download = input(f'Do you want to download {model["path"]} ? [y/n]')
|
||||
|
||||
if download in ['y', 'Y']:
|
||||
GPT4All.download_model(model['path'], model_dir)
|
||||
else:
|
||||
raise ValueError(f"Model '{model['path']}' not found.")
|
||||
|
||||
model = GPT4All(model_name=model['path'],
|
||||
#n_threads=8,
|
||||
verbose=False,
|
||||
allow_download=False,
|
||||
model_path=model_dir)
|
||||
|
||||
system_template = next((message['content'] for message in messages if message['role'] == 'system'),
|
||||
'A chat between a curious user and an artificial intelligence assistant.')
|
||||
|
||||
prompt_template = 'USER: {0}\nASSISTANT: '
|
||||
conversation = '\n'.join(f"{msg['role'].upper()}: {msg['content']}" for msg in messages) + "\nASSISTANT: "
|
||||
|
||||
with model.chat_session(system_template, prompt_template):
|
||||
if stream:
|
||||
for token in model.generate(conversation, streaming=True):
|
||||
yield token
|
||||
else:
|
||||
yield model.generate(conversation)
|
@ -1,86 +0,0 @@
|
||||
models = {
|
||||
"mistral-7b": {
|
||||
"path": "mistral-7b-openorca.gguf2.Q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
|
||||
"system": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>"
|
||||
},
|
||||
"mistral-7b-instruct": {
|
||||
"path": "mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "[INST] %1 [/INST]",
|
||||
"system": None
|
||||
},
|
||||
"gpt4all-falcon": {
|
||||
"path": "gpt4all-falcon-newbpe-q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "### Instruction:\n%1\n### Response:\n",
|
||||
"system": None
|
||||
},
|
||||
"orca-2": {
|
||||
"path": "orca-2-13b.Q4_0.gguf",
|
||||
"ram": "16",
|
||||
"prompt": None,
|
||||
"system": None
|
||||
},
|
||||
"wizardlm-13b": {
|
||||
"path": "wizardlm-13b-v1.2.Q4_0.gguf",
|
||||
"ram": "16",
|
||||
"prompt": None,
|
||||
"system": None
|
||||
},
|
||||
"nous-hermes-llama2": {
|
||||
"path": "nous-hermes-llama2-13b.Q4_0.gguf",
|
||||
"ram": "16",
|
||||
"prompt": "### Instruction:\n%1\n### Response:\n",
|
||||
"system": None
|
||||
},
|
||||
"gpt4all-13b-snoozy": {
|
||||
"path": "gpt4all-13b-snoozy-q4_0.gguf",
|
||||
"ram": "16",
|
||||
"prompt": None,
|
||||
"system": None
|
||||
},
|
||||
"mpt-7b-chat": {
|
||||
"path": "mpt-7b-chat-newbpe-q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
|
||||
"system": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>"
|
||||
},
|
||||
"orca-mini-3b": {
|
||||
"path": "orca-mini-3b-gguf2-q4_0.gguf",
|
||||
"ram": "4",
|
||||
"prompt": "### User:\n%1\n### Response:\n",
|
||||
"system": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n"
|
||||
},
|
||||
"replit-code-3b": {
|
||||
"path": "replit-code-v1_5-3b-newbpe-q4_0.gguf",
|
||||
"ram": "4",
|
||||
"prompt": "%1",
|
||||
"system": None
|
||||
},
|
||||
"starcoder": {
|
||||
"path": "starcoder-newbpe-q4_0.gguf",
|
||||
"ram": "4",
|
||||
"prompt": "%1",
|
||||
"system": None
|
||||
},
|
||||
"rift-coder-7b": {
|
||||
"path": "rift-coder-v0-7b-q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "%1",
|
||||
"system": None
|
||||
},
|
||||
"all-MiniLM-L6-v2": {
|
||||
"path": "all-MiniLM-L6-v2-f16.gguf",
|
||||
"ram": "1",
|
||||
"prompt": None,
|
||||
"system": None
|
||||
},
|
||||
"mistral-7b-german": {
|
||||
"path": "em_german_mistral_v01.Q4_0.gguf",
|
||||
"ram": "8",
|
||||
"prompt": "USER: %1 ASSISTANT: ",
|
||||
"system": "Du bist ein hilfreicher Assistent. "
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
.
|
0
g4f/locals/__init__.py
Normal file
0
g4f/locals/__init__.py
Normal file
50
g4f/locals/models.py
Normal file
50
g4f/locals/models.py
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
|
||||
def load_models():
|
||||
response = requests.get("https://gpt4all.io/models/models3.json")
|
||||
raise_for_status(response)
|
||||
return format_models(response.json())
|
||||
|
||||
def get_model_name(filename: str) -> str:
|
||||
name = filename.split(".", 1)[0]
|
||||
for replace in ["-v1_5", "-v1", "-q4_0", "_v01", "-v0", "-f16", "-gguf2", "-newbpe"]:
|
||||
name = name.replace(replace, "")
|
||||
return name
|
||||
|
||||
def format_models(models: list) -> dict:
|
||||
return {get_model_name(model["filename"]): {
|
||||
"path": model["filename"],
|
||||
"ram": model["ramrequired"],
|
||||
"prompt": model["promptTemplate"] if "promptTemplate" in model else None,
|
||||
"system": model["systemPrompt"] if "systemPrompt" in model else None,
|
||||
} for model in models}
|
||||
|
||||
def read_models(file_path: str):
|
||||
with open(file_path, "rb") as f:
|
||||
return json.load(f)
|
||||
|
||||
def save_models(file_path: str, data):
|
||||
with open(file_path, 'w') as f:
|
||||
json.dump(data, f, indent=4)
|
||||
|
||||
def get_model_dir() -> str:
|
||||
local_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
project_dir = os.path.dirname(os.path.dirname(local_dir))
|
||||
model_dir = os.path.join(project_dir, "models")
|
||||
if os.path.exists(model_dir):
|
||||
return model_dir
|
||||
|
||||
def get_models() -> dict[str, dict]:
|
||||
model_dir = get_model_dir()
|
||||
file_path = os.path.join(model_dir, "models.json")
|
||||
if os.path.isfile(file_path):
|
||||
return read_models(file_path)
|
||||
else:
|
||||
models = load_models()
|
||||
save_models(file_path, models)
|
||||
return models
|
72
g4f/locals/provider.py
Normal file
72
g4f/locals/provider.py
Normal file
@ -0,0 +1,72 @@
|
||||
import os
|
||||
|
||||
from gpt4all import GPT4All
|
||||
from .models import get_models
|
||||
from ..typing import Messages
|
||||
|
||||
MODEL_LIST: dict[str, dict] = None
|
||||
|
||||
def find_model_dir(model_file: str) -> str:
|
||||
local_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
project_dir = os.path.dirname(os.path.dirname(local_dir))
|
||||
|
||||
new_model_dir = os.path.join(project_dir, "models")
|
||||
new_model_file = os.path.join(new_model_dir, model_file)
|
||||
if os.path.isfile(new_model_file):
|
||||
return new_model_dir
|
||||
|
||||
old_model_dir = os.path.join(local_dir, "models")
|
||||
old_model_file = os.path.join(old_model_dir, model_file)
|
||||
if os.path.isfile(old_model_file):
|
||||
return old_model_dir
|
||||
|
||||
working_dir = "./"
|
||||
for root, dirs, files in os.walk(working_dir):
|
||||
if model_file in files:
|
||||
return root
|
||||
|
||||
return new_model_dir
|
||||
|
||||
class LocalProvider:
|
||||
@staticmethod
|
||||
def create_completion(model: str, messages: Messages, stream: bool = False, **kwargs):
|
||||
global MODEL_LIST
|
||||
if MODEL_LIST is None:
|
||||
MODEL_LIST = get_models()
|
||||
if model not in MODEL_LIST:
|
||||
raise ValueError(f'Model "{model}" not found / not yet implemented')
|
||||
|
||||
model = MODEL_LIST[model]
|
||||
model_file = model["path"]
|
||||
model_dir = find_model_dir(model_file)
|
||||
if not os.path.isfile(os.path.join(model_dir, model_file)):
|
||||
print(f'Model file "models/{model_file}" not found.')
|
||||
download = input(f"Do you want to download {model_file}? [y/n]: ")
|
||||
if download in ["y", "Y"]:
|
||||
GPT4All.download_model(model_file, model_dir)
|
||||
else:
|
||||
raise ValueError(f'Model "{model_file}" not found.')
|
||||
|
||||
model = GPT4All(model_name=model_file,
|
||||
#n_threads=8,
|
||||
verbose=False,
|
||||
allow_download=False,
|
||||
model_path=model_dir)
|
||||
|
||||
system_message = "\n".join(message["content"] for message in messages if message["role"] == "system")
|
||||
if system_message:
|
||||
system_message = "A chat between a curious user and an artificial intelligence assistant."
|
||||
|
||||
prompt_template = "USER: {0}\nASSISTANT: "
|
||||
conversation = "\n" . join(
|
||||
f"{message['role'].upper()}: {message['content']}"
|
||||
for message in messages
|
||||
if message["role"] != "system"
|
||||
) + "\nASSISTANT: "
|
||||
|
||||
with model.chat_session(system_message, prompt_template):
|
||||
if stream:
|
||||
for token in model.generate(conversation, streaming=True):
|
||||
yield token
|
||||
else:
|
||||
yield model.generate(conversation)
|
@ -96,6 +96,7 @@ class BaseRetryProvider(BaseProvider):
|
||||
|
||||
__name__: str = "RetryProvider"
|
||||
supports_stream: bool = True
|
||||
last_provider: Type[BaseProvider] = None
|
||||
|
||||
ProviderType = Union[Type[BaseProvider], BaseRetryProvider]
|
||||
|
||||
|
0
models/.local-model-here
Normal file
0
models/.local-model-here
Normal file
Loading…
Reference in New Issue
Block a user