mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-24 11:34:02 +03:00
Add upload svg image support
Fix upload image in Bing Provider
This commit is contained in:
parent
38dbe4b8e5
commit
07c944ad0a
@ -82,13 +82,16 @@ def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
|
|||||||
Tuple[str, str]: The data and boundary for the payload.
|
Tuple[str, str]: The data and boundary for the payload.
|
||||||
"""
|
"""
|
||||||
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
||||||
data = f"--{boundary}\r\n" \
|
data = f"""--{boundary}
|
||||||
f"Content-Disposition: form-data; name=\"knowledgeRequest\"\r\n\r\n" \
|
Content-Disposition: form-data; name="knowledgeRequest"
|
||||||
f"{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}\r\n" \
|
|
||||||
f"--{boundary}\r\n" \
|
{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}
|
||||||
f"Content-Disposition: form-data; name=\"imageBase64\"\r\n\r\n" \
|
--{boundary}
|
||||||
f"{image_bin}\r\n" \
|
Content-Disposition: form-data; name="imageBase64"
|
||||||
f"--{boundary}--\r\n"
|
|
||||||
|
{image_bin}
|
||||||
|
--{boundary}--
|
||||||
|
"""
|
||||||
return data, boundary
|
return data, boundary
|
||||||
|
|
||||||
def build_knowledge_request(tone: str) -> dict:
|
def build_knowledge_request(tone: str) -> dict:
|
||||||
@ -102,14 +105,17 @@ def build_knowledge_request(tone: str) -> dict:
|
|||||||
dict: The knowledge request payload.
|
dict: The knowledge request payload.
|
||||||
"""
|
"""
|
||||||
return {
|
return {
|
||||||
'invokedSkills': ["ImageById"],
|
"imageInfo": {},
|
||||||
'subscriptionId': "Bing.Chat.Multimodal",
|
"knowledgeRequest": {
|
||||||
'invokedSkillsRequestData': {
|
'invokedSkills': ["ImageById"],
|
||||||
'enableFaceBlur': True
|
'subscriptionId': "Bing.Chat.Multimodal",
|
||||||
},
|
'invokedSkillsRequestData': {
|
||||||
'convoData': {
|
'enableFaceBlur': True
|
||||||
'convoid': "",
|
},
|
||||||
'convotone': tone
|
'convoData': {
|
||||||
|
'convoid': "",
|
||||||
|
'convotone': tone
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,11 +115,11 @@
|
|||||||
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
||||||
style="white-space: pre-wrap;resize: none;"></textarea>
|
style="white-space: pre-wrap;resize: none;"></textarea>
|
||||||
<label for="image" title="Works only with Bing and OpenaiChat">
|
<label for="image" title="Works only with Bing and OpenaiChat">
|
||||||
<input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg" required/>
|
<input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg, image/svg+xml" required/>
|
||||||
<i class="fa-regular fa-image"></i>
|
<i class="fa-regular fa-image"></i>
|
||||||
</label>
|
</label>
|
||||||
<label for="file">
|
<label for="file">
|
||||||
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .svg, .log, .csv, .twig, .md" required/>
|
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
|
||||||
<i class="fa-solid fa-paperclip"></i>
|
<i class="fa-solid fa-paperclip"></i>
|
||||||
</label>
|
</label>
|
||||||
<div id="send-button">
|
<div id="send-button">
|
||||||
|
@ -660,7 +660,13 @@ observer.observe(message_input, { attributes: true });
|
|||||||
}
|
}
|
||||||
document.getElementById("version_text").innerHTML = text
|
document.getElementById("version_text").innerHTML = text
|
||||||
})()
|
})()
|
||||||
|
imageInput.addEventListener('click', async (event) => {
|
||||||
|
imageInput.value = '';
|
||||||
|
});
|
||||||
|
fileInput.addEventListener('click', async (event) => {
|
||||||
|
fileInput.value = '';
|
||||||
|
delete fileInput.dataset.text;
|
||||||
|
});
|
||||||
fileInput.addEventListener('change', async (event) => {
|
fileInput.addEventListener('change', async (event) => {
|
||||||
if (fileInput.files.length) {
|
if (fileInput.files.length) {
|
||||||
type = fileInput.files[0].type;
|
type = fileInput.files[0].type;
|
||||||
|
@ -137,7 +137,7 @@ class Backend_Api:
|
|||||||
if 'image' in request.files:
|
if 'image' in request.files:
|
||||||
file = request.files['image']
|
file = request.files['image']
|
||||||
if file.filename != '' and is_allowed_extension(file.filename):
|
if file.filename != '' and is_allowed_extension(file.filename):
|
||||||
kwargs['image'] = to_image(file.stream)
|
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
|
||||||
if 'json' in request.form:
|
if 'json' in request.form:
|
||||||
json_data = json.loads(request.form['json'])
|
json_data = json.loads(request.form['json'])
|
||||||
else:
|
else:
|
||||||
|
16
g4f/image.py
16
g4f/image.py
@ -4,9 +4,9 @@ import base64
|
|||||||
from .typing import ImageType, Union
|
from .typing import ImageType, Union
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp'}
|
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
|
||||||
|
|
||||||
def to_image(image: ImageType) -> Image.Image:
|
def to_image(image: ImageType, is_svg: bool = False) -> Image.Image:
|
||||||
"""
|
"""
|
||||||
Converts the input image to a PIL Image object.
|
Converts the input image to a PIL Image object.
|
||||||
|
|
||||||
@ -16,6 +16,16 @@ def to_image(image: ImageType) -> Image.Image:
|
|||||||
Returns:
|
Returns:
|
||||||
Image.Image: The converted PIL Image object.
|
Image.Image: The converted PIL Image object.
|
||||||
"""
|
"""
|
||||||
|
if is_svg:
|
||||||
|
try:
|
||||||
|
import cairosvg
|
||||||
|
except ImportError:
|
||||||
|
raise RuntimeError('Install "cairosvg" package for open svg images')
|
||||||
|
if not isinstance(image, bytes):
|
||||||
|
image = image.read()
|
||||||
|
buffer = BytesIO()
|
||||||
|
cairosvg.svg2png(image, write_to=buffer)
|
||||||
|
image = Image.open(buffer)
|
||||||
if isinstance(image, str):
|
if isinstance(image, str):
|
||||||
is_data_uri_an_image(image)
|
is_data_uri_an_image(image)
|
||||||
image = extract_data_uri(image)
|
image = extract_data_uri(image)
|
||||||
@ -153,6 +163,8 @@ def to_base64(image: Image.Image, compression_rate: float) -> str:
|
|||||||
str: The base64-encoded image.
|
str: The base64-encoded image.
|
||||||
"""
|
"""
|
||||||
output_buffer = BytesIO()
|
output_buffer = BytesIO()
|
||||||
|
if image.mode != "RGB":
|
||||||
|
image = image.convert('RGB')
|
||||||
image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
|
image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
|
||||||
return base64.b64encode(output_buffer.getvalue()).decode()
|
return base64.b64encode(output_buffer.getvalue()).decode()
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ from .Provider import (
|
|||||||
Chatgpt4Online,
|
Chatgpt4Online,
|
||||||
ChatgptDemoAi,
|
ChatgptDemoAi,
|
||||||
GeminiProChat,
|
GeminiProChat,
|
||||||
|
PerplexityAi,
|
||||||
ChatgptNext,
|
ChatgptNext,
|
||||||
HuggingChat,
|
HuggingChat,
|
||||||
ChatgptDemo,
|
ChatgptDemo,
|
||||||
@ -78,7 +79,7 @@ gpt_35_long = Model(
|
|||||||
gpt_35_turbo = Model(
|
gpt_35_turbo = Model(
|
||||||
name = 'gpt-3.5-turbo',
|
name = 'gpt-3.5-turbo',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider=RetryProvider([
|
best_provider = RetryProvider([
|
||||||
GptGo, You,
|
GptGo, You,
|
||||||
GptForLove, ChatBase,
|
GptForLove, ChatBase,
|
||||||
Chatgpt4Online,
|
Chatgpt4Online,
|
||||||
@ -114,20 +115,20 @@ llama2_13b = Model(
|
|||||||
llama2_70b = Model(
|
llama2_70b = Model(
|
||||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
|
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mistal
|
# Mistal
|
||||||
mixtral_8x7b = Model(
|
mixtral_8x7b = Model(
|
||||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
mistral_7b = Model(
|
mistral_7b = Model(
|
||||||
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Dolphin
|
# Dolphin
|
||||||
@ -311,7 +312,7 @@ llama70b_v2_chat = Model(
|
|||||||
pi = Model(
|
pi = Model(
|
||||||
name = 'pi',
|
name = 'pi',
|
||||||
base_provider = 'inflection',
|
base_provider = 'inflection',
|
||||||
best_provider=Pi
|
best_provider = Pi
|
||||||
)
|
)
|
||||||
|
|
||||||
class ModelUtils:
|
class ModelUtils:
|
||||||
|
Loading…
Reference in New Issue
Block a user