Merge pull request #1975 from hlohaus/leech

Add get/set cookies dir, hide prompt option in gui
This commit is contained in:
H Lohaus 2024-05-18 23:18:32 +02:00 committed by GitHub
commit 97ce36a217
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 139 additions and 174 deletions

View File

@ -236,19 +236,39 @@ set_cookies(".google.com", {
})
```
Alternatively, you can place your .har and cookie files in the `/har_and_cookies` directory. To export a cookie file, use the EditThisCookie extension available on the Chrome Web Store: [EditThisCookie Extension](https://chromewebstore.google.com/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg).
#### Using .har and Cookie Files
You can also create .har files to capture cookies. If you need further assistance, refer to the next section.
You can place `.har` and cookie files in the default `./har_and_cookies` directory. To export a cookie file, use the [EditThisCookie Extension](https://chromewebstore.google.com/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg) available on the Chrome Web Store.
```bash
python -m g4f.cli api --debug
#### Creating .har Files to Capture Cookies
To capture cookies, you can also create `.har` files. For more details, refer to the next section.
#### Changing the Cookies Directory and Loading Cookie Files in Python
You can change the cookies directory and load cookie files in your Python environment. To set the cookies directory relative to your Python file, use the following code:
```python
import os.path
from g4f.cookies import set_cookies_dir, read_cookie_files
import g4f.debug
g4f.debug.logging = True
cookies_dir = os.path.join(os.path.dirname(__file__), "har_and_cookies")
set_cookies_dir(cookies_dir)
read_cookie_files(cookies_dir)
```
### Debug Mode
If you enable debug mode, you will see logs similar to the following:
```
Read .har file: ./har_and_cookies/you.com.har
Cookies added: 10 from .you.com
Read cookie file: ./har_and_cookies/google.json
Cookies added: 16 from .google.com
Starting server... [g4f v-0.0.0] (debug)
```
#### .HAR File for OpenaiChat Provider

View File

@ -1,18 +1,14 @@
from __future__ import annotations
import asyncio
import os
from typing import Iterator, Union
from ..cookies import get_cookies
from ..image import ImageResponse
from ..errors import MissingRequirementsError, MissingAuthError
from ..errors import MissingAuthError
from ..typing import AsyncResult, Messages, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .bing.create_images import create_images, create_session, get_cookies_from_browser
from .bing.create_images import create_images, create_session
class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Designer"
label = "Microsoft Designer in Bing"
parent = "Bing"
url = "https://www.bing.com/images/create"
working = True
@ -38,30 +34,9 @@ class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
session = BingCreateImages(cookies, proxy, api_key)
yield await session.create_async(messages[-1]["content"])
yield await session.generate(messages[-1]["content"])
def create(self, prompt: str) -> Iterator[Union[ImageResponse, str]]:
"""
Generator for creating imagecompletion based on a prompt.
Args:
prompt (str): Prompt to generate images.
Yields:
Generator[str, None, None]: The final output as markdown formatted string with images.
"""
cookies = self.cookies or get_cookies(".bing.com", False)
if cookies is None or "_U" not in cookies:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [Bing]({login_url})\n\n"
try:
self.cookies = get_cookies_from_browser(self.proxy)
except MissingRequirementsError as e:
raise MissingAuthError(f'Missing "_U" cookie. {e}')
yield asyncio.run(self.create_async(prompt))
async def create_async(self, prompt: str) -> ImageResponse:
async def generate(self, prompt: str) -> ImageResponse:
"""
Asynchronously creates a markdown formatted string with images based on the prompt.
@ -74,7 +49,6 @@ class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
cookies = self.cookies or get_cookies(".bing.com", False)
if cookies is None or "_U" not in cookies:
raise MissingAuthError('Missing "_U" cookie')
proxy = self.proxy or os.environ.get("G4F_PROXY")
async with create_session(cookies, proxy) as session:
images = await create_images(session, prompt, proxy)
async with create_session(cookies, self.proxy) as session:
images = await create_images(session, prompt)
return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"} if len(images) > 1 else {})

View File

@ -4,7 +4,8 @@ import uuid
import secrets
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider
class Blackbox(AsyncGeneratorProvider):
@ -17,8 +18,15 @@ class Blackbox(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
image: ImageType = None,
image_name: str = None,
**kwargs
) -> AsyncResult:
if image is not None:
messages[-1]["data"] = {
"fileText": image_name,
"imageBase64": to_data_uri(image)
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",

View File

@ -1,7 +1,3 @@
"""
This module provides functionalities for creating and managing images using Bing's service.
It includes functions for user login, session creation, image creation, and processing.
"""
from __future__ import annotations
import asyncio
@ -17,9 +13,7 @@ try:
except ImportError:
has_requirements = False
from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
from ...providers.types import ProviderType
from ...errors import MissingRequirementsError, RateLimitError
from ...webdriver import WebDriver, get_driver_cookies, get_browser
@ -101,7 +95,7 @@ def create_session(cookies: Dict[str, str], proxy: str = None, connector: BaseCo
headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
return ClientSession(headers=headers, connector=get_connector(connector, proxy))
async def create_images(session: ClientSession, prompt: str, proxy: str = None, timeout: int = TIMEOUT_IMAGE_CREATION) -> List[str]:
async def create_images(session: ClientSession, prompt: str, timeout: int = TIMEOUT_IMAGE_CREATION) -> List[str]:
"""
Creates images based on a given prompt using Bing's service.
@ -132,7 +126,7 @@ async def create_images(session: ClientSession, prompt: str, proxy: str = None,
raise RuntimeError(f"Create images failed: {error}")
if response.status != 302:
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
async with session.post(url, allow_redirects=False, proxy=proxy, timeout=timeout) as response:
async with session.post(url, allow_redirects=False, timeout=timeout) as response:
if response.status != 302:
raise RuntimeError(f"Create images failed. Code: {response.status}")
@ -186,21 +180,3 @@ def read_images(html_content: str) -> List[str]:
if not images:
raise RuntimeError("No images found")
return images
def patch_provider(provider: ProviderType) -> CreateImagesProvider:
"""
Patches a provider to include image creation capabilities.
Args:
provider (ProviderType): The provider to be patched.
Returns:
CreateImagesProvider: The patched provider with image creation capabilities.
"""
from ..BingCreateImages import BingCreateImages
service = BingCreateImages()
return CreateImagesProvider(
provider,
service.create,
service.create_async
)

View File

@ -26,7 +26,7 @@ from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..helper import format_cookies
@ -138,23 +138,22 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
An ImageRequest object that contains the download URL, file name, and other data
"""
# Convert the image to a PIL Image object and get the extension
image = to_image(image)
extension = image.format.lower()
# Convert the image to a bytes object and get the size
data_bytes = to_bytes(image)
image = to_image(data_bytes)
extension = image.format.lower()
data = {
"file_name": image_name if image_name else f"{image.width}x{image.height}.{extension}",
"file_name": "" if image_name is None else image_name,
"file_size": len(data_bytes),
"use_case": "multimodal"
}
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
cls._update_request_args()
cls._update_request_args(session)
await raise_for_status(response)
image_data = {
**data,
**await response.json(),
"mime_type": f"image/{extension}",
"mime_type": is_accepted_format(data_bytes),
"extension": extension,
"height": image.height,
"width": image.width
@ -275,7 +274,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
first_part = line["message"]["content"]["parts"][0]
if "asset_pointer" not in first_part or "metadata" not in first_part:
return
if first_part["metadata"] is None:
if first_part["metadata"] is None or first_part["metadata"]["dalle"] is None:
return
prompt = first_part["metadata"]["dalle"]["prompt"]
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
@ -365,49 +364,17 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if cls._expires is not None and cls._expires < time.time():
cls._headers = cls._api_key = None
if cls._headers is None or cookies is not None:
cls._create_request_args(cookies)
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
if api_key is not None:
cls._set_api_key(api_key)
if cls.default_model is None and (not cls.needs_auth or cls._api_key is not None):
if cls._api_key is None:
cls._create_request_args(cookies)
async with session.get(
f"{cls.url}/",
headers=DEFAULT_HEADERS
) as response:
cls._update_request_args(session)
await raise_for_status(response)
try:
if not model:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
else:
cls.default_model = cls.get_model(model)
except MissingAuthError:
pass
except Exception as e:
api_key = cls._api_key = None
cls._create_request_args()
if debug.logging:
print("OpenaiChat: Load default model failed")
print(f"{e.__class__.__name__}: {e}")
arkose_token = None
proofTokens = None
if cls.default_model is None:
error = None
try:
arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
cls._create_request_args(cookies, headers)
cls._set_api_key(api_key)
except NoValidHarFileError as e:
error = e
if cls._api_key is None:
await cls.nodriver_access_token(proxy)
try:
arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
cls._create_request_args(cookies, headers)
cls._set_api_key(api_key)
except NoValidHarFileError as e:
if cls._api_key is None and cls.needs_auth:
raise error
raise e
if cls.default_model is None:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
try:
@ -461,7 +428,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
)
ws = None
if need_arkose:
async with session.post("https://chatgpt.com/backend-api/register-websocket", headers=cls._headers) as response:
async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
wss_url = (await response.json()).get("wss_url")
if wss_url:
ws = await session.ws_connect(wss_url)
@ -490,7 +457,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if proofofwork is not None:
headers["Openai-Sentinel-Proof-Token"] = proofofwork
async with session.post(
f"{cls.url}/backend-anon/conversation" if cls._api_key is None else
f"{cls.url}/backend-anon/conversation"
if cls._api_key is None else
f"{cls.url}/backend-api/conversation",
json=data,
headers=headers
@ -580,12 +548,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError(line["error"])
if "message_type" not in line["message"]["metadata"]:
return
try:
image_response = await cls.get_generated_image(session, cls._headers, line)
if image_response is not None:
yield image_response
except Exception as e:
yield e
image_response = await cls.get_generated_image(session, cls._headers, line)
if image_response is not None:
yield image_response
if line["message"]["author"]["role"] != "assistant":
return
if line["message"]["content"]["content_type"] != "text":

View File

@ -12,6 +12,7 @@ from copy import deepcopy
from .crypt import decrypt, encrypt
from ...requests import StreamSession
from ...cookies import get_cookies_dir
from ... import debug
class NoValidHarFileError(Exception):
@ -36,17 +37,14 @@ proofTokens: list = []
def readHAR():
global proofTokens
dirPath = "./"
harPath = []
chatArks = []
accessToken = None
cookies = {}
for root, dirs, files in os.walk(dirPath):
for root, dirs, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
if harPath:
break
if not harPath:
raise NoValidHarFileError("No .har file found")
for path in harPath:

View File

@ -7,6 +7,7 @@ import random
import logging
from ...requests import StreamSession, raise_for_status
from ...cookies import get_cookies_dir
from ...errors import MissingRequirementsError
from ... import debug
@ -28,10 +29,9 @@ public_token = "public-token-live-507a52ad-7e69-496b-aee0-1c9863c7c819"
chatArks: list = None
def readHAR():
dirPath = "./"
harPath = []
chatArks = []
for root, dirs, files in os.walk(dirPath):
for root, dirs, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
@ -65,16 +65,10 @@ def parseHAREntry(entry) -> arkReq:
return tmpArk
async def sendRequest(tmpArk: arkReq, proxy: str = None):
try:
async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxy=proxy) as session:
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
await raise_for_status(response)
return await response.text()
except RuntimeError as e:
if str(e) == "Event loop is closed":
print("Event loop is closed error occurred in sendRequest.")
else:
raise
async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxy=proxy) as session:
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
await raise_for_status(response)
return await response.text()
async def create_telemetry_id(proxy: str = None):
global chatArks

View File

@ -23,8 +23,9 @@ from .typing import Dict, Cookies
from .errors import MissingRequirementsError
from . import debug
# Global variable to store cookies
_cookies: Dict[str, Cookies] = {}
class CookiesConfig():
cookies: Dict[str, Cookies] = {}
cookies_dir: str = "./har_and_cookies"
DOMAINS = [
".bing.com",
@ -48,20 +49,18 @@ def get_cookies(domain_name: str = '', raise_requirements_error: bool = True, si
Returns:
Dict[str, str]: A dictionary of cookie names and values.
"""
global _cookies
if domain_name in _cookies:
return _cookies[domain_name]
if domain_name in CookiesConfig.cookies:
return CookiesConfig.cookies[domain_name]
cookies = load_cookies_from_browsers(domain_name, raise_requirements_error, single_browser)
_cookies[domain_name] = cookies
CookiesConfig.cookies[domain_name] = cookies
return cookies
def set_cookies(domain_name: str, cookies: Cookies = None) -> None:
global _cookies
if cookies:
_cookies[domain_name] = cookies
elif domain_name in _cookies:
_cookies.pop(domain_name)
CookiesConfig.cookies[domain_name] = cookies
elif domain_name in CookiesConfig.cookies:
CookiesConfig.cookies.pop(domain_name)
def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True, single_browser: bool = False) -> Cookies:
"""
@ -96,7 +95,13 @@ def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool
print(f"Error reading cookies from {cookie_fn.__name__} for {domain_name}: {e}")
return cookies
def read_cookie_files(dirPath: str = "./har_and_cookies"):
def set_cookies_dir(dir: str) -> None:
CookiesConfig.cookies_dir = dir
def get_cookies_dir() -> str:
return CookiesConfig.cookies_dir
def read_cookie_files(dirPath: str = None):
def get_domain(v: dict) -> str:
host = [h["value"] for h in v['request']['headers'] if h["name"].lower() in ("host", ":authority")]
if not host:
@ -106,16 +111,16 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
if d in host:
return d
global _cookies
harFiles = []
cookieFiles = []
for root, dirs, files in os.walk(dirPath):
for root, dirs, files in os.walk(CookiesConfig.cookies_dir if dirPath is None else dirPath):
for file in files:
if file.endswith(".har"):
harFiles.append(os.path.join(root, file))
elif file.endswith(".json"):
cookieFiles.append(os.path.join(root, file))
_cookies = {}
CookiesConfig.cookies = {}
for path in harFiles:
with open(path, 'rb') as file:
try:
@ -134,7 +139,7 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
for c in v['request']['cookies']:
v_cookies[c['name']] = c['value']
if len(v_cookies) > 0:
_cookies[domain] = v_cookies
CookiesConfig.cookies[domain] = v_cookies
new_cookies[domain] = len(v_cookies)
if debug.logging:
for domain, new_values in new_cookies.items():
@ -159,7 +164,7 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
for domain, new_values in new_cookies.items():
if debug.logging:
print(f"Cookies added: {len(new_values)} from {domain}")
_cookies[domain] = new_values
CookiesConfig.cookies[domain] = new_values
def _g4f(domain_name: str) -> list:
"""

View File

@ -93,22 +93,22 @@
<div class="paper">
<h3>Settings</h3>
<div class="field">
<span class="label">Web Access</span>
<span class="label">Web Access with DuckDuckGo</span>
<input type="checkbox" id="switch" />
<label for="switch" class="toogle" title="Add the pages of the first 5 search results to the query."></label>
</div>
<div class="field">
<span class="label">Disable History</span>
<span class="label">Disable Conversation History</span>
<input type="checkbox" id="history" />
<label for="history" class="toogle" title="To improve the reaction time or if you have trouble with large conversations."></label>
</div>
<div class="field">
<span class="label">Hide System prompt</span>
<span class="label">Hide System-prompt</span>
<input type="checkbox" id="hide-systemPrompt" />
<label for="hide-systemPrompt" class="toogle" title="For more space on phones"></label>
</div>
<div class="field">
<span class="label">Auto continue</span>
<span class="label">Auto continue in ChatGPT</span>
<input id="auto_continue" type="checkbox" name="auto_continue" checked/>
<label for="auto_continue" class="toogle" title="Continue large responses in OpenaiChat"></label>
</div>
@ -121,8 +121,8 @@
<input type="text" id="recognition-language" value="" placeholder="navigator.language"/>
</div>
<div class="field box">
<label for="Bing-api_key" class="label" title="">Bing:</label>
<textarea id="Bing-api_key" name="Bing[api_key]" class="BingCreateImages-api_key" placeholder="&quot;_U&quot; cookie"></textarea>
<label for="BingCreateImages-api_key" class="label" title="">Microsoft Designer in Bing:</label>
<textarea id="BingCreateImages-api_key" name="BingCreateImages[api_key]" placeholder="&quot;_U&quot; cookie"></textarea>
</div>
<div class="field box">
<label for="DeepInfra-api_key" class="label" title="">DeepInfra:</label>
@ -144,14 +144,14 @@
<label for="Openai-api_key" class="label" title="">OpenAI API:</label>
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
<label for="OpenaiAccount-api_key" class="label" title="">OpenAI ChatGPT:</label>
<textarea id="OpenaiAccount-api_key" name="OpenaiAccount[api_key]" class="OpenaiChat-api_key" placeholder="access_key"></textarea>
</div>
<div class="field box">
<label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label>
<textarea id="OpenRouter-api_key" name="OpenRouter[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
<label for="PerplexityApi-api_key" class="label" title="">Perplexity API:</label>
<textarea id="PerplexityApi-api_key" name="PerplexityApi[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
<label for="Replicate-api_key" class="label" title="">Replicate:</label>
<textarea id="Replicate-api_key" name="Replicate[api_key]" class="ReplicateImage-api_key" placeholder="api_key"></textarea>
@ -173,7 +173,10 @@
<div id="messages" class="box"></div>
<div class="toolbar">
<div id="input-count" class="">
&nbsp;
<button class="hide-input">
<i class="fa-solid fa-angles-down"></i>
</button>
<span class="text"></span>
</div>
<div class="stop_generating stop_generating-hidden">
<button id="cancelButton">

View File

@ -457,7 +457,11 @@ body {
#input-count {
width: fit-content;
font-size: 12px;
padding: 6px var(--inner-gap);
padding: 6px 6px;
}
#input-count .text {
padding: 0 4px;
}
.stop_generating, .toolbar .regenerate {
@ -491,6 +495,13 @@ body {
animation: show_popup 0.4s;
}
.toolbar .hide-input {
background: transparent;
border: none;
color: var(--colour-3);
cursor: pointer;
}
@keyframes show_popup {
from {
opacity: 0;

View File

@ -11,7 +11,7 @@ const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file");
const microLabel = document.querySelector(".micro-label");
const inputCount = document.getElementById("input-count");
const inputCount = document.getElementById("input-count").querySelector(".text");
const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model");
const modelProvider = document.getElementById("model2");
@ -41,9 +41,7 @@ appStorage = window.localStorage || {
length: 0
}
const markdown = window.markdownit({
html: true,
});
const markdown = window.markdownit();
const markdown_render = (content) => {
return markdown.render(content
.replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
@ -813,6 +811,17 @@ document.getElementById("regenerateButton").addEventListener("click", async () =
await ask_gpt();
});
const hide_input = document.querySelector(".toolbar .hide-input");
hide_input.addEventListener("click", async (e) => {
const icon = hide_input.querySelector("i");
const func = icon.classList.contains("fa-angles-down") ? "add" : "remove";
const remv = icon.classList.contains("fa-angles-down") ? "remove" : "add";
icon.classList[func]("fa-angles-up");
icon.classList[remv]("fa-angles-down");
document.querySelector(".conversation .user-input").classList[func]("hidden");
document.querySelector(".conversation .buttons").classList[func]("hidden");
});
const uuid = () => {
return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace(
/[xy]/g,
@ -1016,7 +1025,7 @@ const count_input = async () => {
if (countFocus.value) {
inputCount.innerText = count_words_and_tokens(countFocus.value, get_selected_model());
} else {
inputCount.innerHTML = "&nbsp;"
inputCount.innerText = "";
}
}, 100);
};
@ -1060,6 +1069,8 @@ async function on_api() {
messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
// If not mobile
if (!window.matchMedia("(pointer:coarse)").matches)
if (evt.keyCode === 13 && !evt.shiftKey) {
evt.preventDefault();
console.log("pressed enter");
@ -1262,6 +1273,7 @@ async function load_provider_models(providerIndex=null) {
if (!providerIndex) {
providerIndex = providerSelect.selectedIndex;
}
modelProvider.innerHTML = '';
const provider = providerSelect.options[providerIndex].value;
if (!provider) {
modelProvider.classList.add("hidden");
@ -1269,7 +1281,6 @@ async function load_provider_models(providerIndex=null) {
return;
}
const models = await api('models', provider);
modelProvider.innerHTML = '';
if (models.length > 0) {
modelSelect.classList.add("hidden");
modelProvider.classList.remove("hidden");

View File

@ -210,8 +210,8 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
result = "\n".join(
#f"[![#{idx+1} {alt}]({preview[idx]})]({image})"
f'[<img src="{preview[idx]}" width="200" alt="#{idx+1} {alt}">]({image})'
f"[![#{idx+1} {alt}]({preview[idx]})]({image})"
#f'[<img src="{preview[idx]}" width="200" alt="#{idx+1} {alt}">]({image})'
for idx, image in enumerate(images)
)
start_flag = "<!-- generated images start -->\n"