2023-09-03 11:26:26 +03:00
|
|
|
from __future__ import annotations
|
2023-07-28 13:07:17 +03:00
|
|
|
|
2023-09-05 18:35:51 +03:00
|
|
|
import random
|
|
|
|
import json
|
|
|
|
import os
|
2023-10-05 06:13:37 +03:00
|
|
|
import uuid
|
2023-12-18 15:07:00 +03:00
|
|
|
import time
|
2024-01-10 12:34:56 +03:00
|
|
|
from urllib import parse
|
|
|
|
from aiohttp import ClientSession, ClientTimeout
|
|
|
|
|
|
|
|
from ..typing import AsyncResult, Messages
|
2023-10-02 23:43:36 +03:00
|
|
|
from .base_provider import AsyncGeneratorProvider
|
2024-01-10 12:34:56 +03:00
|
|
|
from ..webdriver import get_browser, get_driver_cookies
|
|
|
|
from .bing.upload_image import upload_image
|
|
|
|
from .bing.create_images import create_images, format_images_markdown, wait_for_login
|
|
|
|
from .bing.conversation import Conversation, create_conversation, delete_conversation
|
2023-07-28 13:07:17 +03:00
|
|
|
|
2023-10-02 23:43:36 +03:00
|
|
|
class Tones():
|
|
|
|
creative = "Creative"
|
|
|
|
balanced = "Balanced"
|
|
|
|
precise = "Precise"
|
2023-09-03 11:26:26 +03:00
|
|
|
|
2023-08-25 07:41:32 +03:00
|
|
|
class Bing(AsyncGeneratorProvider):
|
2023-10-27 23:59:14 +03:00
|
|
|
url = "https://bing.com/chat"
|
|
|
|
working = True
|
2023-10-25 00:44:44 +03:00
|
|
|
supports_message_history = True
|
2023-10-27 23:59:14 +03:00
|
|
|
supports_gpt_4 = True
|
2023-08-23 03:16:35 +03:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def create_async_generator(
|
2023-10-02 23:43:36 +03:00
|
|
|
model: str,
|
2023-10-09 11:22:17 +03:00
|
|
|
messages: Messages,
|
|
|
|
proxy: str = None,
|
2023-10-02 23:43:36 +03:00
|
|
|
cookies: dict = None,
|
|
|
|
tone: str = Tones.creative,
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
image: str = None,
|
2023-12-19 23:44:56 +03:00
|
|
|
web_search: bool = False,
|
2023-10-02 23:43:36 +03:00
|
|
|
**kwargs
|
2023-10-09 11:22:17 +03:00
|
|
|
) -> AsyncResult:
|
2023-08-23 03:16:35 +03:00
|
|
|
if len(messages) < 2:
|
|
|
|
prompt = messages[0]["content"]
|
|
|
|
context = None
|
|
|
|
else:
|
|
|
|
prompt = messages[-1]["content"]
|
2023-08-25 07:41:32 +03:00
|
|
|
context = create_context(messages[:-1])
|
2023-08-28 17:41:59 +03:00
|
|
|
|
2023-12-18 15:07:00 +03:00
|
|
|
if not cookies:
|
2024-01-10 12:34:56 +03:00
|
|
|
cookies = Defaults.cookies
|
2023-12-18 15:07:00 +03:00
|
|
|
else:
|
2024-01-10 12:34:56 +03:00
|
|
|
for key, value in Defaults.cookies.items():
|
2023-12-18 15:07:00 +03:00
|
|
|
if key not in cookies:
|
|
|
|
cookies[key] = value
|
2023-12-21 03:03:15 +03:00
|
|
|
|
|
|
|
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
|
|
|
|
|
|
|
|
return stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo)
|
2023-07-28 13:07:17 +03:00
|
|
|
|
2023-10-09 11:22:17 +03:00
|
|
|
def create_context(messages: Messages):
|
2023-10-23 10:46:25 +03:00
|
|
|
return "".join(
|
2023-10-23 15:00:36 +03:00
|
|
|
f"[{message['role']}]" + ("(#message)" if message['role']!="system" else "(#additional_instructions)") + f"\n{message['content']}\n\n"
|
2023-10-23 10:46:25 +03:00
|
|
|
for message in messages
|
|
|
|
)
|
2023-07-28 13:07:17 +03:00
|
|
|
|
|
|
|
class Defaults:
|
|
|
|
delimiter = "\x1e"
|
|
|
|
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
|
|
|
|
|
|
|
allowedMessageTypes = [
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
"ActionRequest",
|
2023-07-28 13:07:17 +03:00
|
|
|
"Chat",
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
"Context",
|
2023-10-23 15:00:36 +03:00
|
|
|
# "Disengaged", unwanted
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
"Progress",
|
2023-10-23 15:00:36 +03:00
|
|
|
# "AdsQuery", unwanted
|
2023-07-28 13:07:17 +03:00
|
|
|
"SemanticSerp",
|
|
|
|
"GenerateContentQuery",
|
|
|
|
"SearchQuery",
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
# The following message types should not be added so that it does not flood with
|
|
|
|
# useless messages (such as "Analyzing images" or "Searching the web") while it's retrieving the AI response
|
|
|
|
# "InternalSearchQuery",
|
|
|
|
# "InternalSearchResult",
|
2023-10-23 15:00:36 +03:00
|
|
|
"RenderCardRequest",
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
# "RenderContentRequest"
|
2023-07-28 13:07:17 +03:00
|
|
|
]
|
|
|
|
|
|
|
|
sliceIds = [
|
2023-10-23 15:00:36 +03:00
|
|
|
'abv2',
|
|
|
|
'srdicton',
|
|
|
|
'convcssclick',
|
|
|
|
'stylewv2',
|
|
|
|
'contctxp2tf',
|
|
|
|
'802fluxv1pc_a',
|
|
|
|
'806log2sphs0',
|
|
|
|
'727savemem',
|
|
|
|
'277teditgnds0',
|
|
|
|
'207hlthgrds0',
|
2023-07-28 13:07:17 +03:00
|
|
|
]
|
|
|
|
|
|
|
|
location = {
|
|
|
|
"locale": "en-US",
|
|
|
|
"market": "en-US",
|
|
|
|
"region": "US",
|
|
|
|
"locationHints": [
|
|
|
|
{
|
|
|
|
"country": "United States",
|
|
|
|
"state": "California",
|
|
|
|
"city": "Los Angeles",
|
|
|
|
"timezoneoffset": 8,
|
|
|
|
"countryConfidence": 8,
|
|
|
|
"Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
|
|
|
|
"RegionType": 2,
|
|
|
|
"SourceType": 1,
|
|
|
|
}
|
|
|
|
],
|
|
|
|
}
|
|
|
|
|
2023-08-21 23:39:57 +03:00
|
|
|
headers = {
|
|
|
|
'accept': '*/*',
|
|
|
|
'accept-language': 'en-US,en;q=0.9',
|
|
|
|
'cache-control': 'max-age=0',
|
|
|
|
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
|
|
|
'sec-ch-ua-arch': '"x86"',
|
|
|
|
'sec-ch-ua-bitness': '"64"',
|
|
|
|
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
|
|
|
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
|
|
|
'sec-ch-ua-mobile': '?0',
|
|
|
|
'sec-ch-ua-model': '""',
|
|
|
|
'sec-ch-ua-platform': '"Windows"',
|
|
|
|
'sec-ch-ua-platform-version': '"15.0.0"',
|
|
|
|
'sec-fetch-dest': 'document',
|
|
|
|
'sec-fetch-mode': 'navigate',
|
|
|
|
'sec-fetch-site': 'none',
|
|
|
|
'sec-fetch-user': '?1',
|
|
|
|
'upgrade-insecure-requests': '1',
|
|
|
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
|
|
|
'x-edge-shopping-flag': '1',
|
|
|
|
'x-forwarded-for': ip_address,
|
|
|
|
}
|
|
|
|
|
2023-10-02 23:43:36 +03:00
|
|
|
optionsSets = [
|
2023-10-23 15:00:36 +03:00
|
|
|
'nlu_direct_response_filter',
|
|
|
|
'deepleo',
|
|
|
|
'disable_emoji_spoken_text',
|
|
|
|
'responsible_ai_policy_235',
|
|
|
|
'enablemm',
|
|
|
|
'iyxapbing',
|
|
|
|
'iycapbing',
|
|
|
|
'gencontentv3',
|
|
|
|
'fluxsrtrunc',
|
|
|
|
'fluxtrunc',
|
|
|
|
'fluxv1',
|
|
|
|
'rai278',
|
|
|
|
'replaceurl',
|
|
|
|
'eredirecturl',
|
|
|
|
'nojbfedge'
|
2023-10-02 23:43:36 +03:00
|
|
|
]
|
2024-01-10 12:34:56 +03:00
|
|
|
|
|
|
|
cookies = {
|
|
|
|
'SRCHD' : 'AF=NOFORM',
|
|
|
|
'PPLState' : '1',
|
|
|
|
'KievRPSSecAuth': '',
|
|
|
|
'SUID' : '',
|
|
|
|
'SRCHUSR' : '',
|
|
|
|
'SRCHHPGUSR' : f'HV={int(time.time())}',
|
|
|
|
}
|
2023-08-21 23:39:57 +03:00
|
|
|
|
2023-08-27 19:58:36 +03:00
|
|
|
def format_message(msg: dict) -> str:
|
|
|
|
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
2023-08-21 23:39:57 +03:00
|
|
|
|
2024-01-10 12:34:56 +03:00
|
|
|
def create_message(
|
|
|
|
conversation: Conversation,
|
|
|
|
prompt: str,
|
|
|
|
tone: str,
|
|
|
|
context: str = None,
|
|
|
|
image_info: dict = None,
|
|
|
|
web_search: bool = False,
|
|
|
|
gpt4_turbo: bool = False
|
|
|
|
) -> str:
|
2023-10-23 15:00:36 +03:00
|
|
|
options_sets = Defaults.optionsSets
|
|
|
|
if tone == Tones.creative:
|
|
|
|
options_sets.append("h3imaginative")
|
|
|
|
elif tone == Tones.precise:
|
|
|
|
options_sets.append("h3precise")
|
|
|
|
elif tone == Tones.balanced:
|
|
|
|
options_sets.append("galileo")
|
|
|
|
else:
|
|
|
|
options_sets.append("harmonyv3")
|
2023-12-21 03:03:15 +03:00
|
|
|
|
2023-12-19 23:44:56 +03:00
|
|
|
if not web_search:
|
|
|
|
options_sets.append("nosearchall")
|
2023-12-21 03:03:15 +03:00
|
|
|
|
|
|
|
if gpt4_turbo:
|
|
|
|
options_sets.append("dlgpt4t")
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
|
2023-10-05 06:13:37 +03:00
|
|
|
request_id = str(uuid.uuid4())
|
2023-08-21 23:39:57 +03:00
|
|
|
struct = {
|
|
|
|
'arguments': [
|
|
|
|
{
|
|
|
|
'source': 'cib',
|
2023-10-23 15:00:36 +03:00
|
|
|
'optionsSets': options_sets,
|
2023-08-21 23:39:57 +03:00
|
|
|
'allowedMessageTypes': Defaults.allowedMessageTypes,
|
|
|
|
'sliceIds': Defaults.sliceIds,
|
|
|
|
'traceId': os.urandom(16).hex(),
|
|
|
|
'isStartOfSession': True,
|
2023-10-05 06:13:37 +03:00
|
|
|
'requestId': request_id,
|
2023-12-25 03:41:29 +03:00
|
|
|
'message': {**Defaults.location, **{
|
2023-08-21 23:39:57 +03:00
|
|
|
'author': 'user',
|
|
|
|
'inputMethod': 'Keyboard',
|
|
|
|
'text': prompt,
|
2023-10-05 06:13:37 +03:00
|
|
|
'messageType': 'Chat',
|
|
|
|
'requestId': request_id,
|
|
|
|
'messageId': request_id,
|
2023-12-25 03:41:29 +03:00
|
|
|
}},
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
"scenario": "SERP",
|
2023-10-02 23:43:36 +03:00
|
|
|
'tone': tone,
|
2023-10-05 06:13:37 +03:00
|
|
|
'spokenTextMode': 'None',
|
|
|
|
'conversationId': conversation.conversationId,
|
2023-08-21 23:39:57 +03:00
|
|
|
'participant': {
|
|
|
|
'id': conversation.clientId
|
|
|
|
},
|
|
|
|
}
|
|
|
|
],
|
2023-10-05 06:13:37 +03:00
|
|
|
'invocationId': '1',
|
2023-08-21 23:39:57 +03:00
|
|
|
'target': 'chat',
|
|
|
|
'type': 4
|
|
|
|
}
|
2024-01-10 12:34:56 +03:00
|
|
|
if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info:
|
|
|
|
struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl']
|
|
|
|
struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl']
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
struct['arguments'][0]['experienceType'] = None
|
|
|
|
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
|
2023-08-21 23:39:57 +03:00
|
|
|
if context:
|
|
|
|
struct['arguments'][0]['previousMessages'] = [{
|
|
|
|
"author": "user",
|
|
|
|
"description": context,
|
|
|
|
"contextType": "WebPage",
|
|
|
|
"messageType": "Context",
|
|
|
|
"messageId": "discover-web--page-ping-mriduna-----"
|
|
|
|
}]
|
|
|
|
return format_message(struct)
|
|
|
|
|
|
|
|
async def stream_generate(
|
|
|
|
prompt: str,
|
2023-10-02 23:43:36 +03:00
|
|
|
tone: str,
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
image: str = None,
|
2023-10-09 11:22:17 +03:00
|
|
|
context: str = None,
|
|
|
|
proxy: str = None,
|
2023-12-19 23:44:56 +03:00
|
|
|
cookies: dict = None,
|
2023-12-21 03:03:15 +03:00
|
|
|
web_search: bool = False,
|
|
|
|
gpt4_turbo: bool = False
|
2023-08-21 23:39:57 +03:00
|
|
|
):
|
2024-01-10 12:34:56 +03:00
|
|
|
headers = Defaults.headers
|
|
|
|
if cookies:
|
|
|
|
headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
|
2023-08-21 23:39:57 +03:00
|
|
|
async with ClientSession(
|
2024-01-10 12:34:56 +03:00
|
|
|
timeout=ClientTimeout(total=900),
|
|
|
|
headers=headers
|
|
|
|
) as session:
|
|
|
|
conversation = await create_conversation(session, proxy)
|
|
|
|
image_info = None
|
|
|
|
if image:
|
|
|
|
image_info = await upload_image(session, image, tone, proxy)
|
2023-08-21 23:39:57 +03:00
|
|
|
try:
|
2024-01-10 12:34:56 +03:00
|
|
|
async with session.ws_connect(
|
|
|
|
'wss://sydney.bing.com/sydney/ChatHub',
|
|
|
|
autoping=False,
|
|
|
|
params={'sec_access_token': conversation.conversationSignature},
|
|
|
|
proxy=proxy
|
|
|
|
) as wss:
|
2023-08-21 23:39:57 +03:00
|
|
|
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
2023-10-05 06:13:37 +03:00
|
|
|
await wss.receive(timeout=900)
|
2024-01-10 12:34:56 +03:00
|
|
|
await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo))
|
2023-08-21 23:39:57 +03:00
|
|
|
|
|
|
|
response_txt = ''
|
|
|
|
returned_text = ''
|
|
|
|
final = False
|
|
|
|
while not final:
|
|
|
|
msg = await wss.receive(timeout=900)
|
2024-01-10 12:34:56 +03:00
|
|
|
if not msg.data:
|
|
|
|
continue
|
2023-08-21 23:39:57 +03:00
|
|
|
objects = msg.data.split(Defaults.delimiter)
|
|
|
|
for obj in objects:
|
|
|
|
if obj is None or not obj:
|
|
|
|
continue
|
|
|
|
response = json.loads(obj)
|
|
|
|
if response.get('type') == 1 and response['arguments'][0].get('messages'):
|
|
|
|
message = response['arguments'][0]['messages'][0]
|
|
|
|
if (message['contentOrigin'] != 'Apology'):
|
2023-10-02 18:01:15 +03:00
|
|
|
if 'adaptiveCards' in message:
|
|
|
|
card = message['adaptiveCards'][0]['body'][0]
|
|
|
|
if "text" in card:
|
|
|
|
response_txt = card.get('text')
|
|
|
|
if message.get('messageType'):
|
|
|
|
inline_txt = card['inlines'][0].get('text')
|
|
|
|
response_txt += inline_txt + '\n'
|
|
|
|
elif message.get('contentType') == "IMAGE":
|
2024-01-10 12:34:56 +03:00
|
|
|
prompt = message.get('text')
|
|
|
|
try:
|
|
|
|
response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt)
|
|
|
|
except:
|
|
|
|
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
2023-10-02 18:01:15 +03:00
|
|
|
final = True
|
2023-08-21 23:39:57 +03:00
|
|
|
if response_txt.startswith(returned_text):
|
|
|
|
new = response_txt[len(returned_text):]
|
|
|
|
if new != "\n":
|
|
|
|
yield new
|
|
|
|
returned_text = response_txt
|
|
|
|
elif response.get('type') == 2:
|
|
|
|
result = response['item']['result']
|
|
|
|
if result.get('error'):
|
2024-01-10 12:34:56 +03:00
|
|
|
if result["value"] == "CaptchaChallenge":
|
|
|
|
driver = get_browser(proxy=proxy)
|
|
|
|
try:
|
|
|
|
for chunk in wait_for_login(driver):
|
|
|
|
yield chunk
|
|
|
|
cookies = get_driver_cookies(driver)
|
|
|
|
finally:
|
|
|
|
driver.quit()
|
|
|
|
async for chunk in stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo):
|
|
|
|
yield chunk
|
|
|
|
else:
|
|
|
|
raise Exception(f"{result['value']}: {result['message']}")
|
2023-10-02 23:43:36 +03:00
|
|
|
return
|
2023-08-21 23:39:57 +03:00
|
|
|
finally:
|
Major Update for Bing - Supports latest bundle version and image analysis
Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us.
As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif!
**What is a data URI and how can I provide an image to Bing?**
Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications.
To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733).
Now, here is a code snippet you can use to provide images to Bing:
```python
import g4f
provider = g4f.Provider.Bing
user_message = [{"role": "user", "content": "Hi, describe this image."}]
response = g4f.ChatCompletion.create(
model = g4f.models.gpt_4,
provider = g4f.provider, # Corrected the provider value
messages = user_message,
stream = True,
image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here
)
for message in response:
print(message, flush=True, end='')
```
If you don't want to analyze the image, just do not specify the image parameter.
Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time.
As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration:
"Hi,"
"Hi, this,"
"Hi, this is,"
"Hi, this is Bing."
Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does.
Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example:
"Here is your link reference ["
"Here is your link reference [^"
"Here is your link reference [^1"
"Here is your link reference [^1^"
And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end.
For this reason, I am working on an update to anticipate the markdown detector.
So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you!
2023-10-22 16:59:56 +03:00
|
|
|
await delete_conversation(session, conversation, proxy)
|