mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-27 05:04:19 +03:00
Add GeminiPro API provider
Set min version for undetected-chromedriver Add api_key to the new client
This commit is contained in:
parent
51b4aaae05
commit
51264fe20c
19
README.md
19
README.md
@ -103,7 +103,7 @@ or set the api base in your client to: [http://localhost:1337/v1](http://localho
|
||||
1. [Download and install Python](https://www.python.org/downloads/) (Version 3.10+ is recommended).
|
||||
2. [Install Google Chrome](https://www.google.com/chrome/) for providers with webdriver
|
||||
|
||||
##### Install using pypi:
|
||||
##### Install using PyPI package:
|
||||
|
||||
```
|
||||
pip install -U g4f[all]
|
||||
@ -113,12 +113,12 @@ Or use partial requirements.
|
||||
|
||||
See: [/docs/requirements](/docs/requirements.md)
|
||||
|
||||
##### Install from source:
|
||||
##### Install from source using git:
|
||||
|
||||
See: [/docs/git](/docs/git.md)
|
||||
|
||||
|
||||
##### Install using Docker
|
||||
##### Install using Docker for Developers:
|
||||
|
||||
See: [/docs/docker](/docs/docker.md)
|
||||
|
||||
@ -126,7 +126,6 @@ See: [/docs/git](/docs/git.md)
|
||||
## 💡 Usage
|
||||
|
||||
#### Text Generation
|
||||
**with Python**
|
||||
|
||||
```python
|
||||
from g4f.client import Client
|
||||
@ -134,14 +133,13 @@ from g4f.client import Client
|
||||
client = Client()
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Say this is a test"}],
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
...
|
||||
)
|
||||
print(response.choices[0].message.content)
|
||||
```
|
||||
|
||||
#### Image Generation
|
||||
**with Python**
|
||||
|
||||
```python
|
||||
from g4f.client import Client
|
||||
@ -154,14 +152,15 @@ response = client.images.generate(
|
||||
)
|
||||
image_url = response.data[0].url
|
||||
```
|
||||
Result:
|
||||
|
||||
**Result:**
|
||||
|
||||
[![Image with cat](/docs/cat.jpeg)](/docs/client.md)
|
||||
|
||||
**See also for Python:**
|
||||
**See also:**
|
||||
|
||||
- [Documentation for new Client](/docs/client.md)
|
||||
- [Documentation for leagcy API](/docs/leagcy.md)
|
||||
- Documentation for the new Client: [/docs/client](/docs/client.md)
|
||||
- Documentation for the leagcy API: [docs/leagcy](/docs/leagcy.md)
|
||||
|
||||
|
||||
#### Web UI
|
||||
|
@ -37,12 +37,16 @@ client = Client(
|
||||
)
|
||||
```
|
||||
|
||||
You also have the option to define a proxy in the client for all outgoing requests:
|
||||
## Configuration
|
||||
|
||||
You can set an "api_key" for your provider in client.
|
||||
And you also have the option to define a proxy for all outgoing requests:
|
||||
|
||||
```python
|
||||
from g4f.client import Client
|
||||
|
||||
client = Client(
|
||||
api_key="...",
|
||||
proxies="http://user:pass@host",
|
||||
...
|
||||
)
|
||||
@ -74,7 +78,7 @@ stream = client.chat.completions.create(
|
||||
)
|
||||
for chunk in stream:
|
||||
if chunk.choices[0].delta.content:
|
||||
print(chunk.choices[0].delta.content, end="")
|
||||
print(chunk.choices[0].delta.content or "", end="")
|
||||
```
|
||||
|
||||
**Image Generation:**
|
||||
@ -109,7 +113,28 @@ image_url = response.data[0].url
|
||||
|
||||
Original / Variant:
|
||||
|
||||
[![Original Image](/docs/cat.jpeg)](/docs/client.md)
|
||||
[![Variant Image](/docs/cat.webp)](/docs/client.md)
|
||||
[![Original Image](/docs/cat.jpeg)](/docs/client.md) [![Variant Image](/docs/cat.webp)](/docs/client.md)
|
||||
|
||||
#### Advanced example using GeminiProVision
|
||||
|
||||
```python
|
||||
from g4f.client import Client
|
||||
from g4f.Provider.GeminiPro import GeminiPro
|
||||
|
||||
client = Client(
|
||||
api_key="...",
|
||||
provider=GeminiPro
|
||||
)
|
||||
response = client.chat.completions.create(
|
||||
model="gemini-pro-vision",
|
||||
messages=[{"role": "user", "content": "What are on this image?"}],
|
||||
image=open("docs/cat.jpeg", "rb")
|
||||
)
|
||||
print(response.choices[0].message.content)
|
||||
```
|
||||
**Question:** What are on this image?
|
||||
```
|
||||
A cat is sitting on a window sill looking at a bird outside the window.
|
||||
```
|
||||
|
||||
[Return to Home](/)
|
86
g4f/Provider/GeminiPro.py
Normal file
86
g4f/Provider/GeminiPro.py
Normal file
@ -0,0 +1,86 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages, ImageType
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import to_bytes, is_accepted_format
|
||||
|
||||
|
||||
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://ai.google.dev"
|
||||
working = True
|
||||
supports_message_history = True
|
||||
default_model = "gemini-pro"
|
||||
models = ["gemini-pro", "gemini-pro-vision"]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
api_key: str = None,
|
||||
image: ImageType = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = "gemini-pro-vision" if not model and image else model
|
||||
model = cls.get_model(model)
|
||||
api_key = api_key if api_key else kwargs.get("access_token")
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
method = "streamGenerateContent" if stream else "generateContent"
|
||||
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:{method}"
|
||||
contents = [
|
||||
{
|
||||
"role": "model" if message["role"] == "assistant" else message["role"],
|
||||
"parts": [{"text": message["content"]}]
|
||||
}
|
||||
for message in messages
|
||||
]
|
||||
if image:
|
||||
image = to_bytes(image)
|
||||
contents[-1]["parts"].append({
|
||||
"inline_data": {
|
||||
"mime_type": is_accepted_format(image),
|
||||
"data": base64.b64encode(image).decode()
|
||||
}
|
||||
})
|
||||
data = {
|
||||
"contents": contents,
|
||||
# "generationConfig": {
|
||||
# "stopSequences": kwargs.get("stop"),
|
||||
# "temperature": kwargs.get("temperature"),
|
||||
# "maxOutputTokens": kwargs.get("max_tokens"),
|
||||
# "topP": kwargs.get("top_p"),
|
||||
# "topK": kwargs.get("top_k"),
|
||||
# }
|
||||
}
|
||||
async with session.post(url, params={"key": api_key}, json=data, proxy=proxy) as response:
|
||||
if not response.ok:
|
||||
data = await response.json()
|
||||
raise RuntimeError(data[0]["error"]["message"])
|
||||
if stream:
|
||||
lines = []
|
||||
async for chunk in response.content:
|
||||
if chunk == b"[{\n":
|
||||
lines = [b"{\n"]
|
||||
elif chunk == b",\r\n" or chunk == b"]":
|
||||
try:
|
||||
data = b"".join(lines)
|
||||
data = json.loads(data)
|
||||
yield data["candidates"][0]["content"]["parts"][0]["text"]
|
||||
except:
|
||||
data = data.decode() if isinstance(data, bytes) else data
|
||||
raise RuntimeError(f"Read text failed. data: {data}")
|
||||
lines = []
|
||||
else:
|
||||
lines.append(chunk)
|
||||
else:
|
||||
data = await response.json()
|
||||
yield data["candidates"][0]["content"]["parts"][0]["text"]
|
@ -34,6 +34,7 @@ from .FakeGpt import FakeGpt
|
||||
from .FreeChatgpt import FreeChatgpt
|
||||
from .FreeGpt import FreeGpt
|
||||
from .GeekGpt import GeekGpt
|
||||
from .GeminiPro import GeminiPro
|
||||
from .GeminiProChat import GeminiProChat
|
||||
from .Gpt6 import Gpt6
|
||||
from .GPTalk import GPTalk
|
||||
|
@ -23,10 +23,11 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt, get_cookies
|
||||
from ...webdriver import get_browser, get_driver_cookies
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImageType
|
||||
from ...requests import StreamSession
|
||||
from ...requests import get_args_from_browser
|
||||
from ...requests.aiohttp import StreamSession
|
||||
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
|
||||
from ...errors import MissingRequirementsError, MissingAuthError
|
||||
|
||||
from ... import debug
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""A class for creating and managing conversations with OpenAI chat service"""
|
||||
@ -39,7 +40,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
default_model = None
|
||||
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"]
|
||||
model_aliases = {"text-davinci-002-render-sha": "gpt-3.5-turbo"}
|
||||
_cookies: dict = {}
|
||||
_args: dict = None
|
||||
|
||||
@classmethod
|
||||
async def create(
|
||||
@ -169,11 +170,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
if not cls.default_model:
|
||||
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
data = await response.json()
|
||||
if "categories" in data:
|
||||
cls.default_model = data["categories"][-1]["default_model"]
|
||||
else:
|
||||
raise RuntimeError(f"Response: {data}")
|
||||
return cls.default_model
|
||||
raise RuntimeError(f"Response: {data}")
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
@ -249,8 +251,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
first_part = line["message"]["content"]["parts"][0]
|
||||
if "asset_pointer" not in first_part or "metadata" not in first_part:
|
||||
return
|
||||
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
|
||||
if first_part["metadata"] is None:
|
||||
return
|
||||
prompt = first_part["metadata"]["dalle"]["prompt"]
|
||||
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
|
||||
try:
|
||||
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
@ -289,7 +293,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
access_token: str = None,
|
||||
api_key: str = None,
|
||||
cookies: Cookies = None,
|
||||
auto_continue: bool = False,
|
||||
history_disabled: bool = True,
|
||||
@ -308,7 +312,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
messages (Messages): The list of previous messages.
|
||||
proxy (str): Proxy to use for requests.
|
||||
timeout (int): Timeout for requests.
|
||||
access_token (str): Access token for authentication.
|
||||
api_key (str): Access token for authentication.
|
||||
cookies (dict): Cookies to use for authentication.
|
||||
auto_continue (bool): Flag to automatically continue the conversation.
|
||||
history_disabled (bool): Flag to disable history and training.
|
||||
@ -329,35 +333,47 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
|
||||
if not parent_id:
|
||||
parent_id = str(uuid.uuid4())
|
||||
if not cookies:
|
||||
cookies = cls._cookies or get_cookies("chat.openai.com", False)
|
||||
if not access_token and "access_token" in cookies:
|
||||
access_token = cookies["access_token"]
|
||||
if not access_token:
|
||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
||||
if login_url:
|
||||
yield f"Please login: [ChatGPT]({login_url})\n\n"
|
||||
try:
|
||||
access_token, cookies = cls.browse_access_token(proxy)
|
||||
except MissingRequirementsError:
|
||||
raise MissingAuthError(f'Missing "access_token"')
|
||||
cls._cookies = cookies
|
||||
|
||||
auth_headers = {"Authorization": f"Bearer {access_token}"}
|
||||
if cls._args is None and cookies is None:
|
||||
cookies = get_cookies("chat.openai.com", False)
|
||||
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
|
||||
if api_key is None:
|
||||
api_key = cookies["access_token"] if "access_token" in cookies else api_key
|
||||
if cls._args is None:
|
||||
cls._args = {
|
||||
"headers": {"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")},
|
||||
"cookies": {} if cookies is None else cookies
|
||||
}
|
||||
if api_key is not None:
|
||||
cls._args["headers"]["Authorization"] = f"Bearer {api_key}"
|
||||
async with StreamSession(
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome110",
|
||||
impersonate="chrome",
|
||||
timeout=timeout,
|
||||
headers={"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())}
|
||||
headers=cls._args["headers"]
|
||||
) as session:
|
||||
if api_key is not None:
|
||||
try:
|
||||
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
|
||||
except Exception as e:
|
||||
if debug.logging:
|
||||
print(f"{e.__class__.__name__}: {e}")
|
||||
if cls.default_model is None:
|
||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
||||
if login_url:
|
||||
yield f"Please login: [ChatGPT]({login_url})\n\n"
|
||||
try:
|
||||
cls._args = cls.browse_access_token(proxy)
|
||||
except MissingRequirementsError:
|
||||
raise MissingAuthError(f'Missing or invalid "access_token". Add a new "api_key" please')
|
||||
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
|
||||
try:
|
||||
image_response = None
|
||||
if image:
|
||||
image_response = await cls.upload_image(session, auth_headers, image, kwargs.get("image_name"))
|
||||
image_response = await cls.upload_image(session, cls._args["headers"], image, kwargs.get("image_name"))
|
||||
except Exception as e:
|
||||
yield e
|
||||
end_turn = EndTurn()
|
||||
model = cls.get_model(model or await cls.get_default_model(session, auth_headers))
|
||||
model = cls.get_model(model)
|
||||
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
|
||||
while not end_turn.is_end:
|
||||
arkose_token = await cls.get_arkose_token(session)
|
||||
@ -375,13 +391,19 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if action != "continue":
|
||||
prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"]
|
||||
data["messages"] = cls.create_messages(prompt, image_response)
|
||||
|
||||
# Update cookies before next request
|
||||
for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar:
|
||||
cls._args["cookies"][c.name if hasattr(c, "name") else c.key] = c.value
|
||||
cls._args["headers"]["Cookie"] = "; ".join(f"{k}={v}" for k, v in cls._args["cookies"].items())
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/conversation",
|
||||
json=data,
|
||||
headers={
|
||||
"Accept": "text/event-stream",
|
||||
"OpenAI-Sentinel-Arkose-Token": arkose_token,
|
||||
**auth_headers
|
||||
**cls._args["headers"]
|
||||
}
|
||||
) as response:
|
||||
if not response.ok:
|
||||
@ -403,8 +425,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if "message_type" not in line["message"]["metadata"]:
|
||||
continue
|
||||
try:
|
||||
image_response = await cls.get_generated_image(session, auth_headers, line)
|
||||
if image_response:
|
||||
image_response = await cls.get_generated_image(session, cls._args["headers"], line)
|
||||
if image_response is not None:
|
||||
yield image_response
|
||||
except Exception as e:
|
||||
yield e
|
||||
@ -432,7 +454,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
action = "continue"
|
||||
await asyncio.sleep(5)
|
||||
if history_disabled and auto_continue:
|
||||
await cls.delete_conversation(session, auth_headers, conversation_id)
|
||||
await cls.delete_conversation(session, cls._args["headers"], conversation_id)
|
||||
|
||||
@classmethod
|
||||
def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> tuple[str, dict]:
|
||||
@ -457,7 +479,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
|
||||
"return accessToken;"
|
||||
)
|
||||
return access_token, get_driver_cookies(driver)
|
||||
args = get_args_from_browser(f"{cls.url}/", driver, do_bypass_cloudflare=False)
|
||||
args["headers"]["Authorization"] = f"Bearer {access_token}"
|
||||
args["headers"]["Cookie"] = "; ".join(f"{k}={v}" for k, v in args["cookies"].items() if k != "access_token")
|
||||
return args
|
||||
finally:
|
||||
driver.close()
|
||||
|
||||
|
@ -21,7 +21,7 @@ class ChatCompletionsConfig(BaseModel):
|
||||
temperature: Union[float, None]
|
||||
max_tokens: int = None
|
||||
stop: Union[list[str], str, None]
|
||||
access_token: Union[str, None]
|
||||
api_key: Union[str, None]
|
||||
|
||||
class Api:
|
||||
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
||||
@ -82,10 +82,10 @@ class Api:
|
||||
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
|
||||
try:
|
||||
config.provider = provider if config.provider is None else config.provider
|
||||
if config.access_token is None and request is not None:
|
||||
if config.api_key is None and request is not None:
|
||||
auth_header = request.headers.get("Authorization")
|
||||
if auth_header is not None:
|
||||
config.access_token = auth_header.split(None, 1)[-1]
|
||||
config.api_key = auth_header.split(None, 1)[-1]
|
||||
|
||||
response = self.client.chat.completions.create(
|
||||
**dict(config),
|
||||
@ -124,4 +124,9 @@ def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
|
||||
"error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"},
|
||||
"model": last_provider.get("model") if last_provider else config.model,
|
||||
"provider": last_provider.get("name") if last_provider else config.provider
|
||||
})
|
||||
})
|
||||
|
||||
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
|
||||
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
|
||||
app = Api(engine=g4f, debug=debug)
|
||||
uvicorn.run(app=app, host=host, port=port, use_colors=use_colors)
|
@ -86,20 +86,19 @@ def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
|
||||
yield chunk
|
||||
|
||||
class Client():
|
||||
proxies: Proxies = None
|
||||
chat: Chat
|
||||
images: Images
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str = None,
|
||||
proxies: Proxies = None,
|
||||
provider: ProviderType = None,
|
||||
image_provider: ImageProvider = None,
|
||||
proxies: Proxies = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
self.chat = Chat(self, provider)
|
||||
self.images = Images(self, image_provider)
|
||||
self.api_key: str = api_key
|
||||
self.proxies: Proxies = proxies
|
||||
self.chat: Chat = Chat(self, provider)
|
||||
self.images: Images = Images(self, image_provider)
|
||||
|
||||
def get_proxy(self) -> Union[str, None]:
|
||||
if isinstance(self.proxies, str):
|
||||
@ -125,6 +124,7 @@ class Completions():
|
||||
response_format: dict = None,
|
||||
max_tokens: int = None,
|
||||
stop: Union[list[str], str] = None,
|
||||
api_key: str = None,
|
||||
**kwargs
|
||||
) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
|
||||
if max_tokens is not None:
|
||||
@ -137,9 +137,16 @@ class Completions():
|
||||
stream,
|
||||
**kwargs
|
||||
)
|
||||
response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
response = iter_append_model_and_provider(iter_response(response, stream, response_format, max_tokens, stop))
|
||||
response = provider.create_completion(
|
||||
model, messages, stream,
|
||||
proxy=self.client.get_proxy(),
|
||||
stop=stop,
|
||||
api_key=self.client.api_key if api_key is None else api_key,
|
||||
**kwargs
|
||||
)
|
||||
response = iter_response(response, stream, response_format, max_tokens, stop)
|
||||
response = iter_append_model_and_provider(response)
|
||||
return response if stream else next(response)
|
||||
|
||||
class Chat():
|
||||
|
12
g4f/image.py
12
g4f/image.py
@ -97,17 +97,17 @@ def is_accepted_format(binary_data: bytes) -> bool:
|
||||
ValueError: If the image format is not allowed.
|
||||
"""
|
||||
if binary_data.startswith(b'\xFF\xD8\xFF'):
|
||||
pass # It's a JPEG image
|
||||
return "image/jpeg"
|
||||
elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'):
|
||||
pass # It's a PNG image
|
||||
return "image/png"
|
||||
elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'):
|
||||
pass # It's a GIF image
|
||||
return "image/gif"
|
||||
elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'):
|
||||
pass # It's a JPEG image
|
||||
return "image/jpeg"
|
||||
elif binary_data.startswith(b'\xFF\xD8'):
|
||||
pass # It's a JPEG image
|
||||
return "image/jpeg"
|
||||
elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP':
|
||||
pass # It's a WebP image
|
||||
return "image/webp"
|
||||
else:
|
||||
raise ValueError("Invalid image format (from magic code).")
|
||||
|
||||
|
@ -15,7 +15,13 @@ from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driv
|
||||
from ..errors import MissingRequirementsError
|
||||
from .defaults import DEFAULT_HEADERS
|
||||
|
||||
def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict:
|
||||
def get_args_from_browser(
|
||||
url: str,
|
||||
webdriver: WebDriver = None,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
do_bypass_cloudflare: bool = True
|
||||
) -> dict:
|
||||
"""
|
||||
Create a Session object using a WebDriver to handle cookies and headers.
|
||||
|
||||
@ -29,7 +35,8 @@ def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = No
|
||||
Session: A Session object configured with cookies and headers from the WebDriver.
|
||||
"""
|
||||
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver:
|
||||
bypass_cloudflare(driver, url, timeout)
|
||||
if do_bypass_cloudflare:
|
||||
bypass_cloudflare(driver, url, timeout)
|
||||
cookies = get_driver_cookies(driver)
|
||||
user_agent = driver.execute_script("return navigator.userAgent")
|
||||
parse = urlparse(url)
|
||||
|
@ -16,7 +16,7 @@ uvicorn
|
||||
flask
|
||||
py-arkose-generator
|
||||
async-property
|
||||
undetected-chromedriver
|
||||
undetected-chromedriver>=3.5.5
|
||||
brotli
|
||||
beautifulsoup4
|
||||
setuptools
|
||||
|
2
setup.py
2
setup.py
@ -25,7 +25,7 @@ EXTRA_REQUIRE = {
|
||||
"beautifulsoup4", # internet.search and bing.create_images
|
||||
"brotli", # openai
|
||||
"platformdirs", # webdriver
|
||||
"undetected-chromedriver", # webdriver
|
||||
"undetected-chromedriver>=3.5.5", # webdriver
|
||||
"setuptools", # webdriver
|
||||
"aiohttp_socks", # proxy
|
||||
"pillow", # image
|
||||
|
Loading…
Reference in New Issue
Block a user