mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 19:11:48 +03:00
Improve reading .har file in OpenaiChat
This commit is contained in:
parent
6f2b6cccbd
commit
1e2c18580c
@ -412,7 +412,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if "proofofwork" in chat_requirements:
|
||||
proofofwork = generate_proof_token(
|
||||
**chat_requirements["proofofwork"],
|
||||
user_agent=cls._headers["user-agent"],
|
||||
user_agent=cls._headers.get("user-agent"),
|
||||
proof_token=RequestConfig.proof_token
|
||||
)
|
||||
[debug.log(text) for text in (
|
||||
@ -439,15 +439,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
messages = messages if conversation_id is None else [messages[-1]]
|
||||
data["messages"] = cls.create_messages(messages, image_request)
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"Content-Type": "application/json",
|
||||
"Openai-Sentinel-Chat-Requirements-Token": chat_token,
|
||||
**cls._headers
|
||||
**cls._headers,
|
||||
"accept": "text/event-stream",
|
||||
"content-type": "application/json",
|
||||
"openai-sentinel-chat-requirements-token": chat_token,
|
||||
}
|
||||
if RequestConfig.arkose_token:
|
||||
headers["Openai-Sentinel-Arkose-Token"] = RequestConfig.arkose_token
|
||||
headers["openai-sentinel-arkose-token"] = RequestConfig.arkose_token
|
||||
if proofofwork is not None:
|
||||
headers["Openai-Sentinel-Proof-Token"] = proofofwork
|
||||
headers["openai-sentinel-proof-token"] = proofofwork
|
||||
if need_turnstile and RequestConfig.turnstile_token is not None:
|
||||
headers['openai-sentinel-turnstile-token'] = RequestConfig.turnstile_token
|
||||
async with session.post(
|
||||
|
@ -63,26 +63,28 @@ def readHAR():
|
||||
continue
|
||||
for v in harFile['log']['entries']:
|
||||
v_headers = get_headers(v)
|
||||
try:
|
||||
if "openai-sentinel-proof-token" in v_headers:
|
||||
RequestConfig.proof_token = json.loads(base64.b64decode(
|
||||
v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
|
||||
).decode())
|
||||
if "openai-sentinel-turnstile-token" in v_headers:
|
||||
RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
|
||||
except Exception as e:
|
||||
debug.log(f"Read proof token: {e}")
|
||||
if arkose_url == v['request']['url']:
|
||||
RequestConfig.arkose_request = parseHAREntry(v)
|
||||
elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
|
||||
elif v['request']['url'].startswith(start_url):
|
||||
try:
|
||||
match = re.search(r'"accessToken":"(.*?)"', v["response"]["content"]["text"])
|
||||
if match:
|
||||
RequestConfig.access_token = match.group(1)
|
||||
except KeyError:
|
||||
continue
|
||||
RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
|
||||
RequestConfig.headers = v_headers
|
||||
pass
|
||||
try:
|
||||
if "openai-sentinel-proof-token" in v_headers:
|
||||
RequestConfig.headers = v_headers
|
||||
RequestConfig.proof_token = json.loads(base64.b64decode(
|
||||
v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
|
||||
).decode())
|
||||
if "openai-sentinel-turnstile-token" in v_headers:
|
||||
RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
|
||||
if "authorization" in v_headers:
|
||||
RequestConfig.access_token = v_headers["authorization"].split(" ")[1]
|
||||
RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
|
||||
except Exception as e:
|
||||
debug.log(f"Error on read headers: {e}")
|
||||
if RequestConfig.proof_token is None:
|
||||
raise NoValidHarFileError("No proof_token found in .har files")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user