2024-03-11 04:41:59 +03:00
|
|
|
from __future__ import annotations
|
2024-08-31 09:47:39 +03:00
|
|
|
|
2024-03-11 04:41:59 +03:00
|
|
|
import json
|
2024-12-08 06:13:09 +03:00
|
|
|
import base64
|
|
|
|
import random
|
2024-08-31 09:47:39 +03:00
|
|
|
|
2024-11-06 18:25:09 +03:00
|
|
|
from ...typing import AsyncResult, Messages
|
|
|
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
2024-11-17 13:06:37 +03:00
|
|
|
from ...errors import ModelNotFoundError
|
|
|
|
from ...requests import StreamSession, raise_for_status
|
2024-12-08 06:13:09 +03:00
|
|
|
from ...image import ImageResponse
|
2024-08-31 09:47:39 +03:00
|
|
|
|
2024-12-05 03:07:59 +03:00
|
|
|
from .HuggingChat import HuggingChat
|
2024-09-12 21:35:31 +03:00
|
|
|
|
2024-03-11 04:41:59 +03:00
|
|
|
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|
|
|
url = "https://huggingface.co/chat"
|
|
|
|
working = True
|
|
|
|
supports_message_history = True
|
2024-09-12 21:35:31 +03:00
|
|
|
default_model = HuggingChat.default_model
|
2024-12-08 06:13:09 +03:00
|
|
|
default_image_model = "black-forest-labs/FLUX.1-dev"
|
|
|
|
models = [*HuggingChat.models, default_image_model]
|
|
|
|
image_models = [default_image_model]
|
2024-09-12 21:35:31 +03:00
|
|
|
model_aliases = HuggingChat.model_aliases
|
2024-08-29 09:03:32 +03:00
|
|
|
|
2024-03-11 04:41:59 +03:00
|
|
|
@classmethod
|
|
|
|
async def create_async_generator(
|
|
|
|
cls,
|
|
|
|
model: str,
|
|
|
|
messages: Messages,
|
|
|
|
stream: bool = True,
|
|
|
|
proxy: str = None,
|
|
|
|
api_base: str = "https://api-inference.huggingface.co",
|
|
|
|
api_key: str = None,
|
|
|
|
max_new_tokens: int = 1024,
|
|
|
|
temperature: float = 0.7,
|
2024-12-08 06:13:09 +03:00
|
|
|
prompt: str = None,
|
2024-03-11 04:41:59 +03:00
|
|
|
**kwargs
|
|
|
|
) -> AsyncResult:
|
2024-08-29 09:03:32 +03:00
|
|
|
model = cls.get_model(model)
|
|
|
|
headers = {
|
|
|
|
'accept': '*/*',
|
|
|
|
'accept-language': 'en',
|
|
|
|
'cache-control': 'no-cache',
|
|
|
|
'origin': 'https://huggingface.co',
|
|
|
|
'pragma': 'no-cache',
|
|
|
|
'priority': 'u=1, i',
|
|
|
|
'referer': 'https://huggingface.co/chat/',
|
|
|
|
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
|
|
'sec-ch-ua-mobile': '?0',
|
|
|
|
'sec-ch-ua-platform': '"macOS"',
|
|
|
|
'sec-fetch-dest': 'empty',
|
|
|
|
'sec-fetch-mode': 'cors',
|
|
|
|
'sec-fetch-site': 'same-origin',
|
|
|
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
|
|
|
}
|
2024-03-11 04:41:59 +03:00
|
|
|
if api_key is not None:
|
|
|
|
headers["Authorization"] = f"Bearer {api_key}"
|
2024-12-08 06:13:09 +03:00
|
|
|
if model in cls.image_models:
|
|
|
|
stream = False
|
|
|
|
prompt = messages[-1]["content"] if prompt is None else prompt
|
|
|
|
payload = {"inputs": prompt, "parameters": {"seed": random.randint(0, 2**32)}}
|
|
|
|
else:
|
|
|
|
params = {
|
|
|
|
"return_full_text": False,
|
|
|
|
"max_new_tokens": max_new_tokens,
|
|
|
|
"temperature": temperature,
|
|
|
|
**kwargs
|
|
|
|
}
|
|
|
|
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
|
2024-11-17 13:06:37 +03:00
|
|
|
async with StreamSession(
|
2024-03-11 04:41:59 +03:00
|
|
|
headers=headers,
|
2024-12-08 06:13:09 +03:00
|
|
|
proxy=proxy,
|
|
|
|
timeout=600
|
2024-03-11 04:41:59 +03:00
|
|
|
) as session:
|
|
|
|
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
|
2024-03-16 22:02:15 +03:00
|
|
|
if response.status == 404:
|
2024-03-11 04:41:59 +03:00
|
|
|
raise ModelNotFoundError(f"Model is not supported: {model}")
|
2024-03-16 22:02:15 +03:00
|
|
|
await raise_for_status(response)
|
2024-03-11 04:41:59 +03:00
|
|
|
if stream:
|
|
|
|
first = True
|
2024-11-17 13:06:37 +03:00
|
|
|
async for line in response.iter_lines():
|
2024-03-11 04:41:59 +03:00
|
|
|
if line.startswith(b"data:"):
|
|
|
|
data = json.loads(line[5:])
|
|
|
|
if not data["token"]["special"]:
|
|
|
|
chunk = data["token"]["text"]
|
|
|
|
if first:
|
|
|
|
first = False
|
|
|
|
chunk = chunk.lstrip()
|
2024-11-17 13:06:37 +03:00
|
|
|
if chunk:
|
|
|
|
yield chunk
|
2024-03-11 04:41:59 +03:00
|
|
|
else:
|
2024-12-08 06:13:09 +03:00
|
|
|
if response.headers["content-type"].startswith("image/"):
|
|
|
|
base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()]))
|
|
|
|
url = f"data:{response.headers['content-type']};base64,{base64_data.decode()}"
|
|
|
|
yield ImageResponse(url, prompt)
|
|
|
|
else:
|
|
|
|
yield (await response.json())[0]["generated_text"].strip()
|
2024-08-31 09:47:39 +03:00
|
|
|
|
2024-03-11 04:41:59 +03:00
|
|
|
def format_prompt(messages: Messages) -> str:
|
|
|
|
system_messages = [message["content"] for message in messages if message["role"] == "system"]
|
|
|
|
question = " ".join([messages[-1]["content"], *system_messages])
|
|
|
|
history = "".join([
|
2024-03-16 22:02:15 +03:00
|
|
|
f"<s>[INST]{messages[idx-1]['content']} [/INST] {message['content']}</s>"
|
2024-03-11 04:41:59 +03:00
|
|
|
for idx, message in enumerate(messages)
|
|
|
|
if message["role"] == "assistant"
|
|
|
|
])
|
2024-12-05 03:07:59 +03:00
|
|
|
return f"{history}<s>[INST] {question} [/INST]"
|