From bbb858249bc055819aad331cff326fa9a722b578 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Tue, 17 Dec 2024 21:17:40 +0100 Subject: [PATCH] Improve gradient in gui, show only models with enabled providers (#2492) - **Cloudflare Provider**: Added error handling for missing requirements when fetching arguments. - **Copilot Provider**: Updated the prompt formatting to use a maximum length function, improving message handling. - **PollinationsAI Provider**: Adjusted the prompt length to a maximum of 5000 characters. - **GitHub Copilot Provider**: Updated to use `ClientSession` for better session management. - **CSS Updates**: Enhanced the gradient styling in the GUI for a more visually appealing interface. - **JavaScript Updates**: Added functionality to toggle search options in the chat interface. --- g4f/Provider/Blackbox.py | 1 + g4f/Provider/Cloudflare.py | 9 +- g4f/Provider/Copilot.py | 13 +-- g4f/Provider/PollinationsAI.py | 4 +- g4f/Provider/needs_auth/Gemini.py | 5 +- g4f/Provider/needs_auth/GithubCopilot.py | 25 ++++-- g4f/Provider/needs_auth/OpenaiChat.py | 14 +-- g4f/__init__.py | 15 +++- g4f/api/__init__.py | 91 ++++---------------- g4f/api/stubs.py | 63 ++++++++++++++ g4f/client/__init__.py | 18 ++-- g4f/cookies.py | 1 + g4f/gui/client/index.html | 7 ++ g4f/gui/client/static/css/style.css | 105 +++++++++-------------- g4f/gui/client/static/js/chat.v1.js | 16 +++- g4f/gui/server/api.py | 5 +- g4f/models.py | 7 +- g4f/providers/helper.py | 15 ++++ g4f/providers/retry_provider.py | 6 +- g4f/requests/raise_for_status.py | 33 ++++--- 20 files changed, 254 insertions(+), 199 deletions(-) create mode 100644 g4f/api/stubs.py diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 716ad5a4..bac3d766 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -287,6 +287,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): return response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) + response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) if json_match: diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py index 08a4bf0d..4cff8369 100644 --- a/g4f/Provider/Cloudflare.py +++ b/g4f/Provider/Cloudflare.py @@ -5,8 +5,8 @@ import json from ..typing import AsyncResult, Messages, Cookies from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop -from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies -from ..errors import ResponseStatusError +from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies, DEFAULT_HEADERS +from ..errors import ResponseStatusError, MissingRequirementsError class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): label = "Cloudflare AI" @@ -62,7 +62,10 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: if cls._args is None: - cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies) + try: + cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies) + except MissingRequirementsError: + cls._args = {"headers": DEFAULT_HEADERS, cookies: {}} model = cls.get_model(model) data = { "messages": messages, diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index 4451bbec..e76322da 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -18,7 +18,7 @@ except ImportError: has_nodriver = False from .base_provider import AbstractProvider, ProviderModelMixin, BaseConversation -from .helper import format_prompt +from .helper import format_prompt_max_length from ..typing import CreateResult, Messages, ImagesType from ..errors import MissingRequirementsError, NoValidHarFileError from ..requests.raise_for_status import raise_for_status @@ -120,16 +120,7 @@ class Copilot(AbstractProvider, ProviderModelMixin): conversation_id = response.json().get("id") if return_conversation: yield Conversation(conversation_id) - prompt = format_prompt(messages) - if len(prompt) > 10000: - if len(messages) > 6: - prompt = format_prompt(messages[:3] + messages[-3:]) - if len(prompt) > 10000: - if len(messages) > 2: - prompt = format_prompt(messages[:2] + messages[-1:]) - if len(prompt) > 10000: - prompt = messages[-1]["content"] - debug.log(f"Copilot: Trim messages to: {len(prompt)}") + prompt = format_prompt_max_length(messages, 10000) debug.log(f"Copilot: Created conversation: {conversation_id}") else: conversation_id = conversation.conversation_id diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index 20f3e0c9..72efd088 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -10,7 +10,7 @@ from ..image import ImageResponse from ..requests.raise_for_status import raise_for_status from ..requests.aiohttp import get_connector from .needs_auth.OpenaiAPI import OpenaiAPI -from .helper import format_prompt +from .helper import format_prompt_max_length class PollinationsAI(OpenaiAPI): label = "Pollinations AI" @@ -97,7 +97,7 @@ class PollinationsAI(OpenaiAPI): async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs): if api_key is None: async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session: - prompt = format_prompt(messages) + prompt = format_prompt_max_length(messages, 5000) async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response: await raise_for_status(response) async for line in response.content.iter_any(): diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 3734bbb8..beb9b0c5 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -207,7 +207,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): ) as session: if not cls._snlm0e: await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None - inner_data = json.dumps([None, params["text"], "de-DE", None, 2]) + inner_data = json.dumps([None, params["text"], "en-US", None, 2]) async with session.post( "https://gemini.google.com/_/BardChatUi/data/batchexecute", data={ @@ -334,8 +334,11 @@ async def iter_filter_base64(response_iter: AsyncIterator[bytes]) -> AsyncIterat async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]: buffer = b"" + rest = 0 async for chunk in response_iter: chunk = buffer + chunk rest = len(chunk) % 4 buffer = chunk[-rest:] yield base64.b64decode(chunk[:-rest]) + if rest > 0: + yield base64.b64decode(buffer+rest*b"=") \ No newline at end of file diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py index 4346ab03..1deca50c 100644 --- a/g4f/Provider/needs_auth/GithubCopilot.py +++ b/g4f/Provider/needs_auth/GithubCopilot.py @@ -1,11 +1,12 @@ from __future__ import annotations import json +from aiohttp import ClientSession from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation from ...typing import AsyncResult, Messages, Cookies from ...requests.raise_for_status import raise_for_status -from ...requests import StreamSession +from ...requests.aiohttp import get_connector from ...providers.helper import format_prompt from ...cookies import get_cookies @@ -16,6 +17,7 @@ class Conversation(BaseConversation): self.conversation_id = conversation_id class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin): + label = "GitHub Copilot" url = "https://github.com/copilot" working = True @@ -42,13 +44,22 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin): if not model: model = cls.default_model if cookies is None: - cookies = get_cookies(".github.com") - async with StreamSession( - proxy=proxy, - impersonate="chrome", + cookies = get_cookies("github.com") + async with ClientSession( + connector=get_connector(proxy=proxy), cookies=cookies, headers={ - "GitHub-Verified-Fetch": "true", + 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0', + 'Accept-Language': 'en-US,en;q=0.5', + 'Referer': 'https://github.com/copilot', + 'Content-Type': 'application/json', + 'GitHub-Verified-Fetch': 'true', + 'X-Requested-With': 'XMLHttpRequest', + 'Origin': 'https://github.com', + 'Connection': 'keep-alive', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', } ) as session: headers = {} @@ -87,7 +98,7 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin): json=json_data, headers=headers ) as response: - async for line in response.iter_lines(): + async for line in response.content: if line.startswith(b"data: "): data = json.loads(line[6:]) if data.get("type") == "content": diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index b64f8cd9..42fe5d77 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -352,7 +352,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): json={"p": get_requirements_token(RequestConfig.proof_token) if RequestConfig.proof_token else None}, headers=cls._headers ) as response: - cls._update_request_args(session) + if response.status == 401: + cls._headers = cls._api_key = None + else: + cls._update_request_args(session) await raise_for_status(response) chat_requirements = await response.json() need_turnstile = chat_requirements.get("turnstile", {}).get("required", False) @@ -525,7 +528,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): cls._set_api_key(RequestConfig.access_token) except NoValidHarFileError: if has_nodriver: - if RequestConfig.access_token is None: + if cls._api_key is None: await cls.nodriver_auth(proxy) else: raise @@ -545,7 +548,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers: RequestConfig.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"] if "Authorization" in event.request.headers: - RequestConfig.access_token = event.request.headers["Authorization"].split()[-1] + cls._set_api_key(event.request.headers["Authorization"].split()[-1]) elif event.request.url == arkose_url: RequestConfig.arkose_request = arkReq( arkURL=event.request.url, @@ -560,13 +563,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): user_agent = await page.evaluate("window.navigator.userAgent") await page.select("#prompt-textarea", 240) while True: - if RequestConfig.access_token: + if cls._api_key is not None: break body = await page.evaluate("JSON.stringify(window.__remixContext)") if body: match = re.search(r'"accessToken":"(.*?)"', body) if match: - RequestConfig.access_token = match.group(1) + cls._set_api_key(match.group(1)) break await asyncio.sleep(1) while True: @@ -578,7 +581,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): RequestConfig.cookies[c.name] = c.value await page.close() cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent) - cls._set_api_key(RequestConfig.access_token) @staticmethod def get_default_headers() -> dict: diff --git a/g4f/__init__.py b/g4f/__init__.py index ef3cf23a..7d8da231 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -42,7 +42,9 @@ class ChatCompletion: if "proxy" not in kwargs: proxy = os.environ.get("G4F_PROXY") if proxy: - kwargs['proxy'] = proxy + kwargs["proxy"] = proxy + if ignore_stream: + kwargs["ignore_stream"] = True result = provider.create_completion(model, messages, stream=stream, **kwargs) @@ -53,9 +55,20 @@ class ChatCompletion: messages : Messages, provider : Union[ProviderType, str, None] = None, stream : bool = False, + image : ImageType = None, + image_name: Optional[str] = None, + ignore_stream: bool = False, ignore_working: bool = False, **kwargs) -> Union[AsyncResult, Coroutine[str]]: model, provider = get_model_and_provider(model, provider, False, ignore_working) + if image is not None: + kwargs["images"] = [(image, image_name)] + if "proxy" not in kwargs: + proxy = os.environ.get("G4F_PROXY") + if proxy: + kwargs["proxy"] = proxy + if ignore_stream: + kwargs["ignore_stream"] = True if stream: if hasattr(provider, "create_async_generator"): diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 88b09879..3b4eb18d 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -26,7 +26,6 @@ from fastapi.encoders import jsonable_encoder from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials, HTTPBasic from fastapi.middleware.cors import CORSMiddleware from starlette.responses import FileResponse -from pydantic import BaseModel, Field from typing import Union, Optional, List try: from typing import Annotated @@ -40,11 +39,16 @@ from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_p from g4f.providers.response import BaseConversation from g4f.client.helper import filter_none from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir -from g4f.typing import Messages -from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError +from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError, NoValidHarFileError from g4f.cookies import read_cookie_files, get_cookies_dir from g4f.Provider import ProviderType, ProviderUtils, __providers__ from g4f.gui import get_gui_app +from .stubs import ( + ChatCompletionsConfig, ImageGenerationConfig, + ProviderResponseModel, ModelResponseModel, + ErrorResponseModel, ProviderResponseDetailModel, + FileResponseModel +) logger = logging.getLogger(__name__) @@ -64,18 +68,10 @@ def create_app(): api = Api(app) - if AppConfig.gui: - @app.get("/") - async def home(): - return HTMLResponse(f'g4f v-{g4f.version.utils.current_version}:

' - 'Start to chat: /chat/
' - 'Open Swagger UI at: ' - '/docs') - api.register_routes() api.register_authorization() api.register_validation_exception_handler() - + if AppConfig.gui: gui_app = WSGIMiddleware(get_gui_app()) app.mount("/", gui_app) @@ -100,63 +96,6 @@ def create_app_with_gui_and_debug(): AppConfig.gui = True return create_app() -class ChatCompletionsConfig(BaseModel): - messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]]) - model: str = Field(default="") - provider: Optional[str] = None - stream: bool = False - image: Optional[str] = None - image_name: Optional[str] = None - images: Optional[list[tuple[str, str]]] = None - temperature: Optional[float] = None - max_tokens: Optional[int] = None - stop: Union[list[str], str, None] = None - api_key: Optional[str] = None - web_search: Optional[bool] = None - proxy: Optional[str] = None - conversation_id: Optional[str] = None - history_disabled: Optional[bool] = None - auto_continue: Optional[bool] = None - timeout: Optional[int] = None - -class ImageGenerationConfig(BaseModel): - prompt: str - model: Optional[str] = None - provider: Optional[str] = None - response_format: Optional[str] = None - api_key: Optional[str] = None - proxy: Optional[str] = None - -class ProviderResponseModel(BaseModel): - id: str - object: str = "provider" - created: int - url: Optional[str] - label: Optional[str] - -class ProviderResponseDetailModel(ProviderResponseModel): - models: list[str] - image_models: list[str] - vision_models: list[str] - params: list[str] - -class ModelResponseModel(BaseModel): - id: str - object: str = "model" - created: int - owned_by: Optional[str] - -class ErrorResponseModel(BaseModel): - error: ErrorResponseMessageModel - model: Optional[str] = None - provider: Optional[str] = None - -class ErrorResponseMessageModel(BaseModel): - message: str - -class FileResponseModel(BaseModel): - filename: str - class ErrorResponse(Response): media_type = "application/json" @@ -198,7 +137,7 @@ class Api: security = HTTPBearer(auto_error=False) basic_security = HTTPBasic() - async def get_username(self, request: Request): + async def get_username(self, request: Request) -> str: credentials = await self.basic_security(request) current_password_bytes = credentials.password.encode() is_correct_password = secrets.compare_digest( @@ -222,13 +161,13 @@ class Api: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException: user_g4f_api_key = None - if request.url.path.startswith("/v1"): + path = request.url.path + if path.startswith("/v1"): if user_g4f_api_key is None: return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED) if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN) else: - path = request.url.path if user_g4f_api_key is not None and path.startswith("/images/"): if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN) @@ -261,6 +200,8 @@ class Api: def register_routes(self): @self.app.get("/") async def read_root(): + if AppConfig.gui: + return RedirectResponse("/chat/", 302) return RedirectResponse("/v1", 302) @self.app.get("/v1") @@ -336,6 +277,7 @@ class Api: except ValueError as e: example = json.dumps({"images": [["data:image/jpeg;base64,...", "filename"]]}) return ErrorResponse.from_message(f'The image you send must be a data URI. Example: {example}', status_code=HTTP_422_UNPROCESSABLE_ENTITY) + # Create the completion response response = self.client.chat.completions.create( **filter_none( @@ -379,7 +321,7 @@ class Api: except (ModelNotFoundError, ProviderNotFoundError) as e: logger.exception(e) return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND) - except MissingAuthError as e: + except (MissingAuthError, NoValidHarFileError) as e: logger.exception(e) return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED) except Exception as e: @@ -392,7 +334,6 @@ class Api: HTTP_404_NOT_FOUND: {"model": ErrorResponseModel}, HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel}, } - @self.app.post("/v1/images/generate", responses=responses) @self.app.post("/v1/images/generations", responses=responses) async def generate_image( @@ -564,4 +505,4 @@ def run_api( use_colors=use_colors, factory=True, reload=reload - ) + ) \ No newline at end of file diff --git a/g4f/api/stubs.py b/g4f/api/stubs.py new file mode 100644 index 00000000..8610f6b0 --- /dev/null +++ b/g4f/api/stubs.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from pydantic import BaseModel, Field +from typing import Union, Optional + +from g4f.typing import Messages + +class ChatCompletionsConfig(BaseModel): + messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]]) + model: str = Field(default="") + provider: Optional[str] = None + stream: bool = False + image: Optional[str] = None + image_name: Optional[str] = None + images: Optional[list[tuple[str, str]]] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + stop: Union[list[str], str, None] = None + api_key: Optional[str] = None + web_search: Optional[bool] = None + proxy: Optional[str] = None + conversation_id: Optional[str] = None + history_disabled: Optional[bool] = None + auto_continue: Optional[bool] = None + timeout: Optional[int] = None + +class ImageGenerationConfig(BaseModel): + prompt: str + model: Optional[str] = None + provider: Optional[str] = None + response_format: Optional[str] = None + api_key: Optional[str] = None + proxy: Optional[str] = None + +class ProviderResponseModel(BaseModel): + id: str + object: str = "provider" + created: int + url: Optional[str] + label: Optional[str] + +class ProviderResponseDetailModel(ProviderResponseModel): + models: list[str] + image_models: list[str] + vision_models: list[str] + params: list[str] + +class ModelResponseModel(BaseModel): + id: str + object: str = "model" + created: int + owned_by: Optional[str] + +class ErrorResponseModel(BaseModel): + error: ErrorResponseMessageModel + model: Optional[str] = None + provider: Optional[str] = None + +class ErrorResponseMessageModel(BaseModel): + message: str + +class FileResponseModel(BaseModel): + filename: str \ No newline at end of file diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index ec3fec00..dee3fa30 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -16,7 +16,6 @@ from ..errors import NoImageResponseError from ..providers.retry_provider import IterListProvider from ..providers.asyncio import to_sync_generator, async_generator_to_list from ..Provider.needs_auth import BingCreateImages, OpenaiAccount -from ..image import to_bytes from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse from .image_models import ImageModels from .types import IterResponse, ImageProvider, Client as BaseClient @@ -59,7 +58,7 @@ def iter_response( elif isinstance(chunk, BaseConversation): yield chunk continue - elif isinstance(chunk, SynthesizeData) or chunk is None: + elif isinstance(chunk, SynthesizeData) or not chunk: continue chunk = str(chunk) @@ -122,7 +121,7 @@ async def async_iter_response( elif isinstance(chunk, BaseConversation): yield chunk continue - elif isinstance(chunk, SynthesizeData) or chunk is None: + elif isinstance(chunk, SynthesizeData) or not chunk: continue chunk = str(chunk) @@ -214,6 +213,8 @@ class Completions: stop = [stop] if isinstance(stop, str) else stop if image is not None: kwargs["images"] = [(image, image_name)] + if ignore_stream: + kwargs["ignore_stream"] = True response = provider.create_completion( model, messages, @@ -330,7 +331,6 @@ class Images: model: str, prompt: str, prompt_prefix: str = "Generate a image: ", - image: ImageType = None, **kwargs ) -> ImageResponse: messages = [{"role": "user", "content": f"{prompt_prefix}{prompt}"}] @@ -341,7 +341,6 @@ class Images: messages, stream=True, prompt=prompt, - image=image, **kwargs ): if isinstance(item, ImageResponse): @@ -353,7 +352,6 @@ class Images: messages, True, prompt=prompt, - image=image, **kwargs ): if isinstance(item, ImageResponse): @@ -389,20 +387,22 @@ class Images: if proxy is None: proxy = self.client.proxy prompt = "create a variation of this image" + if image is not None: + kwargs["images"] = [(image, None)] e = None response = None if isinstance(provider_handler, IterListProvider): for provider in provider_handler.providers: try: - response = await self._generate_image_response(provider, provider.__name__, model, prompt, image=image, **kwargs) + response = await self._generate_image_response(provider, provider.__name__, model, prompt, **kwargs) if response is not None: provider_name = provider.__name__ break except Exception as e: debug.log(f"Image provider {provider.__name__}: {e}") else: - response = await self._generate_image_response(provider_handler, provider_name, model, prompt, image=image, **kwargs) + response = await self._generate_image_response(provider_handler, provider_name, model, prompt, **kwargs) if isinstance(response, ImageResponse): return await self._process_image_response(response, response_format, proxy, model, provider_name) @@ -494,6 +494,8 @@ class AsyncCompletions: stop = [stop] if isinstance(stop, str) else stop if image is not None: kwargs["images"] = [(image, image_name)] + if ignore_stream: + kwargs["ignore_stream"] = True if hasattr(provider, "create_async_generator"): create_handler = provider.create_async_generator else: diff --git a/g4f/cookies.py b/g4f/cookies.py index 5867e604..afc9245b 100644 --- a/g4f/cookies.py +++ b/g4f/cookies.py @@ -59,6 +59,7 @@ DOMAINS = [ "chat.reka.ai", "chatgpt.com", ".cerebras.ai", + "github.com", ] if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 5ddc1104..2973fdc8 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -264,6 +264,13 @@
+
+ +