Add Clarity Token to Copilot, (#2482)

* Add Clarity Token to Copilot,
Ignore disabled providers in gui,
Read cookie files after upload
* Add more chat templates to HuggingFace
This commit is contained in:
H Lohaus 2024-12-14 19:56:36 +01:00 committed by GitHub
parent 335c971f6a
commit a591c5d409
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 113 additions and 43 deletions

View File

@ -32,8 +32,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint_imagine2 = "https://api.airforce/imagine2"
working = True
needs_auth = True
supports_stream = True
supports_stream = False
supports_system_message = True
supports_message_history = True
@ -41,9 +40,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = "flux"
hidden_models = {"Flux-1.1-Pro"}
additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
model_aliases = {
# Alias mappings for models
"gpt-4": "gpt-4o",

View File

@ -13,7 +13,7 @@ MODELS = {
'chat': {
'gpt-4o-2024-11-20': {'persona_id': "gpt"},
'gpt-4o': {'persona_id': "summarizer"},
'gpt-4o-mini': {'persona_id': "gemini-1-5-flash"},
'gpt-4o-mini': {'persona_id': "amigo"},
'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
@ -24,7 +24,7 @@ MODELS = {
'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"},
'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'gemini-1.5-flash': {'persona_id': "amigo"},
'gemini-1.5-flash': {'persona_id': "gemini-1.5-flash"},
'claude-3-5-sonnet-20240620': {'persona_id': "claude"},
'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"},
@ -200,7 +200,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
"temperature": temperature,
"top_p": top_p
}
print(data)
async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
await raise_for_status(response)
async for line in response.iter_lines():

View File

@ -2,6 +2,7 @@ from __future__ import annotations
import json
import asyncio
import base64
from http.cookiejar import CookieJar
from urllib.parse import quote
@ -93,6 +94,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
) as session:
if cls._access_token is not None:
cls._cookies = session.cookies.jar
if cls._access_token is None:
try:
url = "https://copilot.microsoft.com/cl/eus-sc/collect"
headers = {
"Accept": "application/x-clarity-gzip",
"referrer": "https://copilot.microsoft.com/onboarding"
}
response = session.post(url, headers=headers, data=get_clarity())
clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
except Exception as e:
debug.log(f"Copilot: {e}")
else:
clarity_token = None
response = session.get("https://copilot.microsoft.com/c/api/user")
raise_for_status(response)
user = response.json().get('firstName')
@ -125,6 +140,12 @@ class Copilot(AbstractProvider, ProviderModelMixin):
uploaded_images.append({"type":"image", "url": response.json().get("url")})
wss = session.ws_connect(cls.websocket_url)
if clarity_token is not None:
wss.send(json.dumps({
"event": "challengeResponse",
"token": clarity_token,
"method":"clarity"
}).encode(), CurlWsFlag.TEXT)
wss.send(json.dumps({
"event": "send",
"conversationId": conversation_id,
@ -209,4 +230,9 @@ def readHAR(url: str):
if api_key is None:
raise NoValidHarFileError("No access token found in .har files")
return api_key, cookies
return api_key, cookies
def get_clarity() -> bytes:
#{"e":["0.7.58",5,7284,4779,"n59ae4ieqq","aln5en","1upufhz",1,0,0],"a":[[7323,12,65,217,324],[7344,12,65,214,329],[7385,12,65,211,334],[7407,12,65,210,337],[7428,12,65,209,338],[7461,12,65,209,339],[7497,12,65,209,339],[7531,12,65,208,340],[7545,12,65,208,342],[11654,13,65,208,342],[11728,14,65,208,342],[11728,9,65,208,342,17535,19455,0,0,0,"Annehmen",null,"52w7wqv1r.8ovjfyrpu",1],[7284,4,1,393,968,393,968,0,0,231,310,939,0],[12063,0,2,147,3,4,4,18,5,1,10,79,25,15],[12063,36,6,[11938,0]]]}
body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
return body

View File

@ -77,9 +77,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
if conversation is None:
conversation = Conversation(model)
is_new_conversation = True
debug.last_model = model
if conversation.vqd is None:
conversation.vqd = await cls.get_vqd(proxy, connector)
if not conversation.vqd:
@ -91,7 +89,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'x-vqd-4': conversation.vqd,
}
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
if is_new_conversation:
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
@ -119,7 +117,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(cls.api_endpoint, json=data) as response:
conversation.vqd = response.headers.get("x-vqd-4")
await raise_for_status(response)
async for line in response.content:
if line:
decoded_line = line.decode('utf-8')

View File

@ -4,6 +4,7 @@ import json
import logging
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
@ -67,17 +68,10 @@ class Mhystical(AsyncGeneratorProvider, ProviderModelMixin):
"messages": [{"role": "user", "content": format_prompt(messages)}]
}
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
if response.status == 400:
yield "Error: API key is missing"
elif response.status == 429:
yield "Error: Rate limit exceeded"
elif response.status == 500:
yield "Error: Internal server error"
else:
response.raise_for_status()
response_text = await response.text()
filtered_response = cls.filter_response(response_text)
yield filtered_response
await raise_for_status(response)
response_text = await response.text()
filtered_response = cls.filter_response(response_text)
yield filtered_response
@staticmethod
def filter_response(response_text: str) -> str:

View File

@ -62,7 +62,6 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
if conversation is not None:
conversation_id = conversation.conversation_id
if conversation_id is None:
print(headers)
async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
await raise_for_status(response)
conversation_id = (await response.json()).get("thread_id")

View File

@ -3,25 +3,37 @@ from __future__ import annotations
import json
import base64
import random
import requests
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...errors import ModelNotFoundError
from ...errors import ModelNotFoundError, ModelNotSupportedError
from ...requests import StreamSession, raise_for_status
from ...image import ImageResponse
from .HuggingChat import HuggingChat
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
url = "https://huggingface.co"
working = True
supports_message_history = True
default_model = HuggingChat.default_model
default_image_model = HuggingChat.default_image_model
models = HuggingChat.models
image_models = [default_image_model]
model_aliases = HuggingChat.model_aliases
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation"
cls.models = [model["id"] for model in requests.get(url).json()]
cls.models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
cls.models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
if not cls.image_models:
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
cls.models.extend(cls.image_models)
return cls.models
@classmethod
async def create_async_generator(
cls,
@ -36,7 +48,10 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
prompt: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
try:
model = cls.get_model(model)
except ModelNotSupportedError:
pass
headers = {
'accept': '*/*',
'accept-language': 'en',
@ -55,6 +70,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
payload = None
if model in cls.image_models:
stream = False
prompt = messages[-1]["content"] if prompt is None else prompt
@ -66,12 +82,28 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
"temperature": temperature,
**kwargs
}
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
async with StreamSession(
headers=headers,
proxy=proxy,
timeout=600
) as session:
if payload is None:
async with session.get(f"https://huggingface.co/api/models/{model}") as response:
model_data = await response.json()
if "config" in model_data and "tokenizer_config" in model_data["config"] and "eos_token" in model_data["config"]["tokenizer_config"]:
eos_token = model_data["config"]["tokenizer_config"]["eos_token"]
if eos_token == "</s>":
inputs = format_prompt_mistral(messages)
elif eos_token == "<|im_end|>":
inputs = format_prompt_qwen(messages)
elif eos_token == "<|eot_id|>":
inputs = format_prompt_llama(messages)
else:
inputs = format_prompt(messages)
else:
inputs = format_prompt(messages)
payload = {"inputs": inputs, "parameters": params, "stream": stream}
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
if response.status == 404:
raise ModelNotFoundError(f"Model is not supported: {model}")
@ -105,3 +137,18 @@ def format_prompt(messages: Messages) -> str:
if message["role"] == "assistant"
])
return f"{history}<s>[INST] {question} [/INST]"
def format_prompt_qwen(messages: Messages) -> str:
return "".join([
f"<|im_start|>{message['role']}\n{message['content']}\n<|im_end|>\n" for message in messages
]) + "<|im_start|>assistant\n"
def format_prompt_llama(messages: Messages) -> str:
return "<|begin_of_text|>" + "".join([
f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages
]) + "<|start_header_id|>assistant<|end_header_id|>\\n\\n"
def format_prompt_mistral(messages: Messages) -> str:
return "".join([
f"<|{message['role']}|>\n{message['content']}'</s>\n" for message in messages
]) + "<|assistant|>\n"

View File

@ -476,7 +476,6 @@ start_time = time.time()
def get_turnstile_token(dx: str, p: str) -> str:
decoded_bytes = base64.b64decode(dx)
# print(decoded_bytes.decode())
return process_turnstile_token(decoded_bytes.decode(), p)

View File

@ -470,15 +470,17 @@ class Api:
})
def upload_cookies(files: List[UploadFile]):
response_data = []
for file in files:
try:
if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
filename = os.path.basename(file.filename)
with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
shutil.copyfileobj(file.file, f)
response_data.append({"filename": filename})
finally:
file.file.close()
if not AppConfig.ignore_cookie_files:
for file in files:
try:
if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
filename = os.path.basename(file.filename)
with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
shutil.copyfileobj(file.file, f)
response_data.append({"filename": filename})
finally:
file.file.close()
read_cookie_files()
return response_data
@self.app.get("/v1/synthesize/{provider}", responses={

View File

@ -610,6 +610,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
const auto_continue = document.getElementById("auto_continue")?.checked;
const download_images = document.getElementById("download_images")?.checked;
let api_key = get_api_key_by_provider(provider);
const ignored = Array.from(settings.querySelectorAll("input.provider:not(:checked)")).map((el)=>el.value);
await api("conversation", {
id: message_id,
conversation_id: window.conversation_id,
@ -620,6 +621,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
auto_continue: auto_continue,
download_images: download_images,
api_key: api_key,
ignored: ignored,
}, files, message_id);
if (!error_storage[message_id]) {
html = markdown_render(message_storage[message_id]);
@ -1217,6 +1219,7 @@ function count_tokens(model, text) {
if (window.GPTTokenizer_cl100k_base) {
return GPTTokenizer_cl100k_base.encode(text).length;
}
return 0;
}
function count_words(text) {
@ -1256,6 +1259,10 @@ systemPrompt.addEventListener("input", function() {
});
window.addEventListener('load', async function() {
await safe_load_conversation(window.conversation_id, false);
});
window.addEventListener('DOMContentLoaded', async function() {
await on_load();
if (window.conversation_id == "{{chat_id}}") {
window.conversation_id = uuid();
@ -1309,7 +1316,6 @@ async function on_api() {
let prompt_lock = false;
messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
// If not mobile and not shift enter
if (!window.matchMedia("(pointer:coarse)").matches && evt.keyCode === 13 && !evt.shiftKey) {
evt.preventDefault();
@ -1361,7 +1367,7 @@ async function on_api() {
option.innerHTML = `
<div class="field">
<span class="label">Enable ${provider.label}</span>
<input id="Provider${provider.name}" type="checkbox" name="Provider${provider.name}" checked="">
<input id="Provider${provider.name}" type="checkbox" name="Provider${provider.name}" value="${provider.name}" class="provider" checked="">
<label for="Provider${provider.name}" class="toogle" title="Remove provider from dropdown"></label>
</div>`;
option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name));

View File

@ -106,6 +106,9 @@ class Api:
if provider in conversations and conversation_id in conversations[provider]:
kwargs["conversation"] = conversations[provider][conversation_id]
if json_data.get("ignored"):
kwargs["ignored"] = json_data["ignored"]
return {
"model": model,
"provider": provider,