Improve gradient in gui, show only models with enabled providers (#2492)

- **Cloudflare Provider**: Added error handling for missing requirements when fetching arguments.
- **Copilot Provider**: Updated the prompt formatting to use a maximum length function, improving message handling.
- **PollinationsAI Provider**: Adjusted the prompt length to a maximum of 5000 characters.
- **GitHub Copilot Provider**: Updated to use `ClientSession` for better session management.
- **CSS Updates**: Enhanced the gradient styling in the GUI for a more visually appealing interface.
- **JavaScript Updates**: Added functionality to toggle search options in the chat interface.
This commit is contained in:
H Lohaus 2024-12-17 21:17:40 +01:00 committed by GitHub
parent 0332d0d820
commit bbb858249b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 254 additions and 199 deletions

View File

@ -287,6 +287,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
return
response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL)
json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
if json_match:

View File

@ -5,8 +5,8 @@ import json
from ..typing import AsyncResult, Messages, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
from ..errors import ResponseStatusError
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies, DEFAULT_HEADERS
from ..errors import ResponseStatusError, MissingRequirementsError
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI"
@ -62,7 +62,10 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
if cls._args is None:
cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
try:
cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
except MissingRequirementsError:
cls._args = {"headers": DEFAULT_HEADERS, cookies: {}}
model = cls.get_model(model)
data = {
"messages": messages,

View File

@ -18,7 +18,7 @@ except ImportError:
has_nodriver = False
from .base_provider import AbstractProvider, ProviderModelMixin, BaseConversation
from .helper import format_prompt
from .helper import format_prompt_max_length
from ..typing import CreateResult, Messages, ImagesType
from ..errors import MissingRequirementsError, NoValidHarFileError
from ..requests.raise_for_status import raise_for_status
@ -120,16 +120,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
conversation_id = response.json().get("id")
if return_conversation:
yield Conversation(conversation_id)
prompt = format_prompt(messages)
if len(prompt) > 10000:
if len(messages) > 6:
prompt = format_prompt(messages[:3] + messages[-3:])
if len(prompt) > 10000:
if len(messages) > 2:
prompt = format_prompt(messages[:2] + messages[-1:])
if len(prompt) > 10000:
prompt = messages[-1]["content"]
debug.log(f"Copilot: Trim messages to: {len(prompt)}")
prompt = format_prompt_max_length(messages, 10000)
debug.log(f"Copilot: Created conversation: {conversation_id}")
else:
conversation_id = conversation.conversation_id

View File

@ -10,7 +10,7 @@ from ..image import ImageResponse
from ..requests.raise_for_status import raise_for_status
from ..requests.aiohttp import get_connector
from .needs_auth.OpenaiAPI import OpenaiAPI
from .helper import format_prompt
from .helper import format_prompt_max_length
class PollinationsAI(OpenaiAPI):
label = "Pollinations AI"
@ -97,7 +97,7 @@ class PollinationsAI(OpenaiAPI):
async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs):
if api_key is None:
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
prompt = format_prompt(messages)
prompt = format_prompt_max_length(messages, 5000)
async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
await raise_for_status(response)
async for line in response.content.iter_any():

View File

@ -207,7 +207,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if not cls._snlm0e:
await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
inner_data = json.dumps([None, params["text"], "de-DE", None, 2])
inner_data = json.dumps([None, params["text"], "en-US", None, 2])
async with session.post(
"https://gemini.google.com/_/BardChatUi/data/batchexecute",
data={
@ -334,8 +334,11 @@ async def iter_filter_base64(response_iter: AsyncIterator[bytes]) -> AsyncIterat
async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
buffer = b""
rest = 0
async for chunk in response_iter:
chunk = buffer + chunk
rest = len(chunk) % 4
buffer = chunk[-rest:]
yield base64.b64decode(chunk[:-rest])
if rest > 0:
yield base64.b64decode(buffer+rest*b"=")

View File

@ -1,11 +1,12 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
from ...typing import AsyncResult, Messages, Cookies
from ...requests.raise_for_status import raise_for_status
from ...requests import StreamSession
from ...requests.aiohttp import get_connector
from ...providers.helper import format_prompt
from ...cookies import get_cookies
@ -16,6 +17,7 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
label = "GitHub Copilot"
url = "https://github.com/copilot"
working = True
@ -42,13 +44,22 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
if not model:
model = cls.default_model
if cookies is None:
cookies = get_cookies(".github.com")
async with StreamSession(
proxy=proxy,
impersonate="chrome",
cookies = get_cookies("github.com")
async with ClientSession(
connector=get_connector(proxy=proxy),
cookies=cookies,
headers={
"GitHub-Verified-Fetch": "true",
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://github.com/copilot',
'Content-Type': 'application/json',
'GitHub-Verified-Fetch': 'true',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://github.com',
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
}
) as session:
headers = {}
@ -87,7 +98,7 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
json=json_data,
headers=headers
) as response:
async for line in response.iter_lines():
async for line in response.content:
if line.startswith(b"data: "):
data = json.loads(line[6:])
if data.get("type") == "content":

View File

@ -352,7 +352,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
json={"p": get_requirements_token(RequestConfig.proof_token) if RequestConfig.proof_token else None},
headers=cls._headers
) as response:
cls._update_request_args(session)
if response.status == 401:
cls._headers = cls._api_key = None
else:
cls._update_request_args(session)
await raise_for_status(response)
chat_requirements = await response.json()
need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
@ -525,7 +528,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._set_api_key(RequestConfig.access_token)
except NoValidHarFileError:
if has_nodriver:
if RequestConfig.access_token is None:
if cls._api_key is None:
await cls.nodriver_auth(proxy)
else:
raise
@ -545,7 +548,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
RequestConfig.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
if "Authorization" in event.request.headers:
RequestConfig.access_token = event.request.headers["Authorization"].split()[-1]
cls._set_api_key(event.request.headers["Authorization"].split()[-1])
elif event.request.url == arkose_url:
RequestConfig.arkose_request = arkReq(
arkURL=event.request.url,
@ -560,13 +563,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_agent = await page.evaluate("window.navigator.userAgent")
await page.select("#prompt-textarea", 240)
while True:
if RequestConfig.access_token:
if cls._api_key is not None:
break
body = await page.evaluate("JSON.stringify(window.__remixContext)")
if body:
match = re.search(r'"accessToken":"(.*?)"', body)
if match:
RequestConfig.access_token = match.group(1)
cls._set_api_key(match.group(1))
break
await asyncio.sleep(1)
while True:
@ -578,7 +581,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
RequestConfig.cookies[c.name] = c.value
await page.close()
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
cls._set_api_key(RequestConfig.access_token)
@staticmethod
def get_default_headers() -> dict:

View File

@ -42,7 +42,9 @@ class ChatCompletion:
if "proxy" not in kwargs:
proxy = os.environ.get("G4F_PROXY")
if proxy:
kwargs['proxy'] = proxy
kwargs["proxy"] = proxy
if ignore_stream:
kwargs["ignore_stream"] = True
result = provider.create_completion(model, messages, stream=stream, **kwargs)
@ -53,9 +55,20 @@ class ChatCompletion:
messages : Messages,
provider : Union[ProviderType, str, None] = None,
stream : bool = False,
image : ImageType = None,
image_name: Optional[str] = None,
ignore_stream: bool = False,
ignore_working: bool = False,
**kwargs) -> Union[AsyncResult, Coroutine[str]]:
model, provider = get_model_and_provider(model, provider, False, ignore_working)
if image is not None:
kwargs["images"] = [(image, image_name)]
if "proxy" not in kwargs:
proxy = os.environ.get("G4F_PROXY")
if proxy:
kwargs["proxy"] = proxy
if ignore_stream:
kwargs["ignore_stream"] = True
if stream:
if hasattr(provider, "create_async_generator"):

View File

@ -26,7 +26,6 @@ from fastapi.encoders import jsonable_encoder
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials, HTTPBasic
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from pydantic import BaseModel, Field
from typing import Union, Optional, List
try:
from typing import Annotated
@ -40,11 +39,16 @@ from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_p
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir
from g4f.typing import Messages
from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError
from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError, NoValidHarFileError
from g4f.cookies import read_cookie_files, get_cookies_dir
from g4f.Provider import ProviderType, ProviderUtils, __providers__
from g4f.gui import get_gui_app
from .stubs import (
ChatCompletionsConfig, ImageGenerationConfig,
ProviderResponseModel, ModelResponseModel,
ErrorResponseModel, ProviderResponseDetailModel,
FileResponseModel
)
logger = logging.getLogger(__name__)
@ -64,18 +68,10 @@ def create_app():
api = Api(app)
if AppConfig.gui:
@app.get("/")
async def home():
return HTMLResponse(f'g4f v-{g4f.version.utils.current_version}:<br><br>'
'Start to chat: <a href="/chat/">/chat/</a><br>'
'Open Swagger UI at: '
'<a href="/docs">/docs</a>')
api.register_routes()
api.register_authorization()
api.register_validation_exception_handler()
if AppConfig.gui:
gui_app = WSGIMiddleware(get_gui_app())
app.mount("/", gui_app)
@ -100,63 +96,6 @@ def create_app_with_gui_and_debug():
AppConfig.gui = True
return create_app()
class ChatCompletionsConfig(BaseModel):
messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
model: str = Field(default="")
provider: Optional[str] = None
stream: bool = False
image: Optional[str] = None
image_name: Optional[str] = None
images: Optional[list[tuple[str, str]]] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
conversation_id: Optional[str] = None
history_disabled: Optional[bool] = None
auto_continue: Optional[bool] = None
timeout: Optional[int] = None
class ImageGenerationConfig(BaseModel):
prompt: str
model: Optional[str] = None
provider: Optional[str] = None
response_format: Optional[str] = None
api_key: Optional[str] = None
proxy: Optional[str] = None
class ProviderResponseModel(BaseModel):
id: str
object: str = "provider"
created: int
url: Optional[str]
label: Optional[str]
class ProviderResponseDetailModel(ProviderResponseModel):
models: list[str]
image_models: list[str]
vision_models: list[str]
params: list[str]
class ModelResponseModel(BaseModel):
id: str
object: str = "model"
created: int
owned_by: Optional[str]
class ErrorResponseModel(BaseModel):
error: ErrorResponseMessageModel
model: Optional[str] = None
provider: Optional[str] = None
class ErrorResponseMessageModel(BaseModel):
message: str
class FileResponseModel(BaseModel):
filename: str
class ErrorResponse(Response):
media_type = "application/json"
@ -198,7 +137,7 @@ class Api:
security = HTTPBearer(auto_error=False)
basic_security = HTTPBasic()
async def get_username(self, request: Request):
async def get_username(self, request: Request) -> str:
credentials = await self.basic_security(request)
current_password_bytes = credentials.password.encode()
is_correct_password = secrets.compare_digest(
@ -222,13 +161,13 @@ class Api:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException:
user_g4f_api_key = None
if request.url.path.startswith("/v1"):
path = request.url.path
if path.startswith("/v1"):
if user_g4f_api_key is None:
return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
else:
path = request.url.path
if user_g4f_api_key is not None and path.startswith("/images/"):
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
@ -261,6 +200,8 @@ class Api:
def register_routes(self):
@self.app.get("/")
async def read_root():
if AppConfig.gui:
return RedirectResponse("/chat/", 302)
return RedirectResponse("/v1", 302)
@self.app.get("/v1")
@ -336,6 +277,7 @@ class Api:
except ValueError as e:
example = json.dumps({"images": [["data:image/jpeg;base64,...", "filename"]]})
return ErrorResponse.from_message(f'The image you send must be a data URI. Example: {example}', status_code=HTTP_422_UNPROCESSABLE_ENTITY)
# Create the completion response
response = self.client.chat.completions.create(
**filter_none(
@ -379,7 +321,7 @@ class Api:
except (ModelNotFoundError, ProviderNotFoundError) as e:
logger.exception(e)
return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
except MissingAuthError as e:
except (MissingAuthError, NoValidHarFileError) as e:
logger.exception(e)
return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
except Exception as e:
@ -392,7 +334,6 @@ class Api:
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
}
@self.app.post("/v1/images/generate", responses=responses)
@self.app.post("/v1/images/generations", responses=responses)
async def generate_image(
@ -564,4 +505,4 @@ def run_api(
use_colors=use_colors,
factory=True,
reload=reload
)
)

63
g4f/api/stubs.py Normal file
View File

@ -0,0 +1,63 @@
from __future__ import annotations
from pydantic import BaseModel, Field
from typing import Union, Optional
from g4f.typing import Messages
class ChatCompletionsConfig(BaseModel):
messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
model: str = Field(default="")
provider: Optional[str] = None
stream: bool = False
image: Optional[str] = None
image_name: Optional[str] = None
images: Optional[list[tuple[str, str]]] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
conversation_id: Optional[str] = None
history_disabled: Optional[bool] = None
auto_continue: Optional[bool] = None
timeout: Optional[int] = None
class ImageGenerationConfig(BaseModel):
prompt: str
model: Optional[str] = None
provider: Optional[str] = None
response_format: Optional[str] = None
api_key: Optional[str] = None
proxy: Optional[str] = None
class ProviderResponseModel(BaseModel):
id: str
object: str = "provider"
created: int
url: Optional[str]
label: Optional[str]
class ProviderResponseDetailModel(ProviderResponseModel):
models: list[str]
image_models: list[str]
vision_models: list[str]
params: list[str]
class ModelResponseModel(BaseModel):
id: str
object: str = "model"
created: int
owned_by: Optional[str]
class ErrorResponseModel(BaseModel):
error: ErrorResponseMessageModel
model: Optional[str] = None
provider: Optional[str] = None
class ErrorResponseMessageModel(BaseModel):
message: str
class FileResponseModel(BaseModel):
filename: str

View File

@ -16,7 +16,6 @@ from ..errors import NoImageResponseError
from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import to_sync_generator, async_generator_to_list
from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
from ..image import to_bytes
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .image_models import ImageModels
from .types import IterResponse, ImageProvider, Client as BaseClient
@ -59,7 +58,7 @@ def iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
elif isinstance(chunk, SynthesizeData) or chunk is None:
elif isinstance(chunk, SynthesizeData) or not chunk:
continue
chunk = str(chunk)
@ -122,7 +121,7 @@ async def async_iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
elif isinstance(chunk, SynthesizeData) or chunk is None:
elif isinstance(chunk, SynthesizeData) or not chunk:
continue
chunk = str(chunk)
@ -214,6 +213,8 @@ class Completions:
stop = [stop] if isinstance(stop, str) else stop
if image is not None:
kwargs["images"] = [(image, image_name)]
if ignore_stream:
kwargs["ignore_stream"] = True
response = provider.create_completion(
model,
messages,
@ -330,7 +331,6 @@ class Images:
model: str,
prompt: str,
prompt_prefix: str = "Generate a image: ",
image: ImageType = None,
**kwargs
) -> ImageResponse:
messages = [{"role": "user", "content": f"{prompt_prefix}{prompt}"}]
@ -341,7 +341,6 @@ class Images:
messages,
stream=True,
prompt=prompt,
image=image,
**kwargs
):
if isinstance(item, ImageResponse):
@ -353,7 +352,6 @@ class Images:
messages,
True,
prompt=prompt,
image=image,
**kwargs
):
if isinstance(item, ImageResponse):
@ -389,20 +387,22 @@ class Images:
if proxy is None:
proxy = self.client.proxy
prompt = "create a variation of this image"
if image is not None:
kwargs["images"] = [(image, None)]
e = None
response = None
if isinstance(provider_handler, IterListProvider):
for provider in provider_handler.providers:
try:
response = await self._generate_image_response(provider, provider.__name__, model, prompt, image=image, **kwargs)
response = await self._generate_image_response(provider, provider.__name__, model, prompt, **kwargs)
if response is not None:
provider_name = provider.__name__
break
except Exception as e:
debug.log(f"Image provider {provider.__name__}: {e}")
else:
response = await self._generate_image_response(provider_handler, provider_name, model, prompt, image=image, **kwargs)
response = await self._generate_image_response(provider_handler, provider_name, model, prompt, **kwargs)
if isinstance(response, ImageResponse):
return await self._process_image_response(response, response_format, proxy, model, provider_name)
@ -494,6 +494,8 @@ class AsyncCompletions:
stop = [stop] if isinstance(stop, str) else stop
if image is not None:
kwargs["images"] = [(image, image_name)]
if ignore_stream:
kwargs["ignore_stream"] = True
if hasattr(provider, "create_async_generator"):
create_handler = provider.create_async_generator
else:

View File

@ -59,6 +59,7 @@ DOMAINS = [
"chat.reka.ai",
"chatgpt.com",
".cerebras.ai",
"github.com",
]
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":

View File

@ -264,6 +264,13 @@
</div>
</div>
<div class="buttons">
<div class="field">
<button id="search">
<a href="" onclick="return false;" title="Enable Web Access">
<i class="fa-solid fa-search"></i>
</a>
</button>
</div>
<div class="field">
<select name="model" id="model">
<option value="">Model: Default</option>

View File

@ -1,36 +1,5 @@
@import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap");
.adsbox {
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
background-color: var(--blur-bg);
height: 100%;
width: 100%;
border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border);
}
.ads {
align-items: center;
margin: auto;
display: flex;
flex-direction: column;
gap: var(--inner-gap);
max-width: 200px;
padding: var(--section-gap);
overflow: none;
flex-shrink: 0;
display: flex;
flex-direction: column;
justify-content: space-between;
}
@media screen and (max-width: 728px) {
.ads {
display: none;
}
}
/* :root {
--colour-1: #ffffff;
--colour-2: #000000;
@ -55,7 +24,6 @@
--colour-6: #242424;
--accent: #8b3dff;
--gradient: var(--accent);
--blur-bg: #16101b66;
--blur-border: #84719040;
--user-input: #ac87bb;
@ -64,9 +32,12 @@
--scrollbar: var(--colour-3);
--scrollbar-thumb: var(--blur-bg);
--button-hover: var(--colour-5);
}
:root {
--top: 50%;
--size: 70vw;
--blur: 35vw; /* Half of 70vw */
--opacity: 0.3;
--gradient: rgba(22, 16, 27, 0.4); /* Transparent dark color */
--background: #16101b; /* Background color */
--font-1: "Inter", sans-serif;
--section-gap: 25px;
--inner-gap: 15px;
@ -88,7 +59,7 @@ body {
}
body {
background: var(--colour-1);
background: var(--background);
color: var(--colour-3);
height: 100vh;
}
@ -98,6 +69,29 @@ body:not(.white) a:visited{
color: var(--colour-3);
}
.gradient {
position: absolute;
z-index: -1;
left: 70vw;
border-radius: 50%;
background: radial-gradient(circle at center, var(--accent), var(--gradient));
width: var(--size);
height: var(--size);
top: var(--top);
transform: translate(-50%, -50%);
filter: blur(var(--blur)) opacity(var(--opacity));
animation: zoom_gradient 6s infinite alternate;
}
@keyframes zoom_gradient {
0% {
transform: translate(-50%, -50%) scale(1);
}
100% {
transform: translate(-50%, -50%) scale(1.2);
}
}
.row {
display: flex;
gap: 10px;
@ -158,6 +152,7 @@ body:not(.white) a:visited{
overflow: auto;
overflow-wrap: break-word;
padding-bottom: 10px;
background-color: transparent;
}
.conversation .user-input {
@ -179,29 +174,6 @@ body:not(.white) a:visited{
color: var(--user-input)
}
.gradient:nth-child(1) {
--top: 0;
--right: 0;
--size: 70vw;
--blur: calc(0.5 * var(--size));
--opacity: 0.3;
animation: zoom_gradient 6s infinite;
}
.gradient {
position: absolute;
z-index: -1;
border-radius: calc(0.5 * var(--size));
background-color: var(--accent);
background: radial-gradient(circle at center, var(--gradient), var(--gradient));
width: 70vw;
height: 70vw;
top: 50%;
right: 0;
transform: translateY(-50%);
filter: blur(calc(0.5 * 70vw)) opacity(var(--opacity));
}
body.white .gradient{
display: none;
}
@ -299,7 +271,7 @@ body.white .gradient{
}
.message.regenerate {
background-color: var(--colour-6);
background-color: rgba(0, 0, 0, 0.2);
}
.white .message.regenerate {
@ -626,7 +598,8 @@ body.white .gradient{
.file-label:has(> input:valid),
.file-label.selected,
.micro-label.recognition {
.micro-label.recognition,
#search.active a i {
color: var(--accent);
}
@ -742,7 +715,7 @@ select {
outline: none;
padding: 8px 16px;
appearance: none;
width: 160px;
width: 132px;
}
.buttons button {
@ -842,7 +815,7 @@ select:hover,
width: 200px;
}
.field {
padding-right: 15px
padding-right: 8px
}
.message {
flex-direction: row;
@ -1029,7 +1002,7 @@ ul {
.buttons {
align-items: flex-start;
flex-wrap: wrap;
gap: 12px;
gap: 8px;
}
.mobile-sidebar {
@ -1172,13 +1145,13 @@ ul {
.white {
--blur-bg: transparent;
--accent: #007bff;
--gradient: #ccc;
--conversations: #0062cc;
--colour-1: #ffffff;
--colour-3: #212529;
--scrollbar: var(--colour-1);
--scrollbar-thumb: var(--gradient);
--scrollbar-thumb: #ccc;
--button-hover: var(--colour-4);
--background: transparent;
}
.white .message .assistant .fa-xmark {

View File

@ -20,6 +20,8 @@ const settings = document.querySelector(".settings");
const chat = document.querySelector(".conversation");
const album = document.querySelector(".images");
const log_storage = document.querySelector(".log");
const switchInput = document.getElementById("switch");
const searchButton = document.getElementById("search");
const optionElementsSelector = ".settings input, .settings textarea, #model, #model2, #provider";
@ -591,7 +593,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
id: message_id,
conversation_id: window.conversation_id,
model: model,
web_search: document.getElementById("switch").checked,
web_search: switchInput.checked,
provider: provider,
messages: messages,
auto_continue: auto_continue,
@ -1434,6 +1436,9 @@ async function on_api() {
}
});
}
const method = switchInput.checked ? "add" : "remove";
searchButton.classList[method]("active");
}
async function load_version() {
@ -1640,6 +1645,7 @@ async function load_provider_models(providerIndex=null) {
}
};
providerSelect.addEventListener("change", () => load_provider_models());
document.getElementById("pin").addEventListener("click", async () => {
const pin_container = document.getElementById("pin_container");
let selected_provider = providerSelect.options[providerSelect.selectedIndex];
@ -1667,6 +1673,14 @@ document.getElementById("pin").addEventListener("click", async () => {
}
});
switchInput.addEventListener("change", () => {
const method = switchInput.checked ? "add" : "remove";
searchButton.classList[method]("active");
});
searchButton.addEventListener("click", async () => {
switchInput.click();
});
function save_storage() {
let filename = `chat ${new Date().toLocaleString()}.json`.replaceAll(":", "-");
let data = {"options": {"g4f": ""}};

View File

@ -30,6 +30,7 @@ class Api:
"providers": [
getattr(provider, "parent", provider.__name__)
for provider in providers
if provider.working
]
}
for model, providers in models.__models__.values()]
@ -178,9 +179,9 @@ class Api:
}
def handle_provider(self, provider_handler, model):
if isinstance(provider_handler, IterListProvider):
if isinstance(provider_handler, IterListProvider) and provider_handler.last_provider is not None:
provider_handler = provider_handler.last_provider
if issubclass(provider_handler, ProviderModelMixin) and provider_handler.last_model is not None:
if hasattr(provider_handler, "last_model") and provider_handler.last_model is not None:
model = provider_handler.last_model
return self._format_json("provider", {**provider_handler.get_dict(), "model": model})

View File

@ -16,13 +16,13 @@ from .Provider import (
DDG,
DeepInfraChat,
Flux,
Free2GPT,
GigaChat,
Gemini,
GeminiPro,
HuggingChat,
HuggingFace,
Liaobots,
Mhystical,
Airforce,
MetaAI,
MicrosoftDesigner,
@ -70,13 +70,14 @@ default = Model(
Blackbox2,
Blackbox,
Copilot,
Free2GPT,
DeepInfraChat,
Airforce,
Cloudflare,
PollinationsAI,
ChatGptEs,
OpenaiChat,
Mhystical,
RubiksAI,
])
)
@ -96,7 +97,7 @@ gpt_35_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, PollinationsAI, Copilot, OpenaiChat, Liaobots, Airforce])
best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, PollinationsAI, Copilot, OpenaiChat, Liaobots, Airforce, Mhystical])
)
gpt_4_turbo = Model(

View File

@ -4,6 +4,7 @@ import random
import string
from ..typing import Messages, Cookies
from .. import debug
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
@ -24,6 +25,20 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str:
])
return f"{formatted}\nAssistant:"
def format_prompt_max_length(messages: Messages, max_lenght: int) -> str:
prompt = format_prompt(messages)
start = len(prompt)
if start > max_lenght:
if len(messages) > 6:
prompt = format_prompt(messages[:3] + messages[-3:])
if len(prompt) > max_lenght:
if len(messages) > 2:
prompt = format_prompt([m for m in messages if m["role"] == "system"] + messages[-1:])
if len(prompt) > max_lenght:
prompt = messages[-1]["content"]
debug.log(f"Messages trimmed from: {start} to: {len(prompt)}")
return prompt
def get_random_string(length: int = 10) -> str:
"""
Generate a random string of specified length, containing lowercase letters and digits.

View File

@ -34,6 +34,7 @@ class IterListProvider(BaseRetryProvider):
model: str,
messages: Messages,
stream: bool = False,
ignore_stream: bool = False,
ignored: list[str] = [],
**kwargs,
) -> CreateResult:
@ -51,7 +52,7 @@ class IterListProvider(BaseRetryProvider):
exceptions = {}
started: bool = False
for provider in self.get_providers(stream, ignored):
for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
try:
@ -109,13 +110,14 @@ class IterListProvider(BaseRetryProvider):
model: str,
messages: Messages,
stream: bool = True,
ignore_stream: bool = False,
ignored: list[str] = [],
**kwargs
) -> AsyncResult:
exceptions = {}
started: bool = False
for provider in self.get_providers(stream, ignored):
for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
try:

View File

@ -23,25 +23,36 @@ def is_openai(text: str) -> bool:
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
if response.status in (429, 402):
raise RateLimitError(f"Response {response.status}: Rate limit reached")
message = await response.text() if not response.ok and message is None else message
if response.status == 403 and is_cloudflare(message):
if response.ok:
return
text = await response.text()
if response.status == 403 and is_cloudflare(text):
raise CloudflareError(f"Response {response.status}: Cloudflare detected")
elif response.status == 403 and is_openai(message):
elif response.status == 403 and is_openai(text):
raise ResponseStatusError(f"Response {response.status}: OpenAI Bot detected")
elif not response.ok:
if message is None and response.headers.get("content-type") == "text/html":
message = "Bad gateway" if response.status == 502 else "HTML content"
elif response.status == 502:
raise ResponseStatusError(f"Response {response.status}: Bad gateway")
elif message is not None:
raise ResponseStatusError(f"Response {response.status}: {message}")
else:
message = "HTML content" if response.headers.get("content-type").startswith("text/html") else text
raise ResponseStatusError(f"Response {response.status}: {message}")
def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, RequestsResponse], message: str = None):
if hasattr(response, "status"):
return raise_for_status_async(response, message)
if response.ok:
return
if response.status_code in (429, 402):
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
elif response.status_code == 403 and is_cloudflare(response.text):
raise CloudflareError(f"Response {response.status_code}: Cloudflare detected")
elif not response.ok:
if message is None and response.headers.get("content-type") == "text/html":
message = "Bad gateway" if response.status_code == 502 else "HTML content"
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")
elif response.status == 403 and is_openai(response.text):
raise ResponseStatusError(f"Response {response.status}: OpenAI Bot detected")
elif message is not None:
raise ResponseStatusError(f"Response {response.status}: {message}")
elif response.status_code == 502:
raise ResponseStatusError(f"Response {response.status}: Bad gateway")
else:
message = "HTML content" if response.headers.get("content-type").startswith("text/html") else response.text
raise ResponseStatusError(f"Response {response.status_code}: {message}")