mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-26 09:57:24 +03:00
Add OpenRouter and DeepInfraImage Provider (#1814)
This commit is contained in:
parent
84475b4159
commit
00951eb791
@ -47,7 +47,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
proxy: str = None,
|
||||
timeout: int = 900,
|
||||
api_key: str = None,
|
||||
cookies: Cookies = None,
|
||||
cookies: Cookies = {},
|
||||
connector: BaseConnector = None,
|
||||
tone: str = None,
|
||||
image: ImageType = None,
|
||||
|
74
g4f/Provider/DeepInfraImage.py
Normal file
74
g4f/Provider/DeepInfraImage.py
Normal file
@ -0,0 +1,74 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from ..image import ImageResponse
|
||||
|
||||
class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://deepinfra.com"
|
||||
working = True
|
||||
default_model = 'stability-ai/sdxl'
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if not cls.models:
|
||||
url = 'https://api.deepinfra.com/models/featured'
|
||||
models = requests.get(url).json()
|
||||
cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
yield await cls.create_async(messages[-1]["content"], model, **kwargs)
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: str,
|
||||
api_key: str = None,
|
||||
api_base: str = "https://api.deepinfra.com/v1/inference",
|
||||
proxy: str = None,
|
||||
timeout: int = 180,
|
||||
extra_data: dict = {},
|
||||
**kwargs
|
||||
) -> ImageResponse:
|
||||
headers = {
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Accept-Language': 'en-US',
|
||||
'Connection': 'keep-alive',
|
||||
'Origin': 'https://deepinfra.com',
|
||||
'Referer': 'https://deepinfra.com/',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Site': 'same-site',
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
||||
'X-Deepinfra-Source': 'web-embed',
|
||||
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
}
|
||||
if api_key is not None:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
async with StreamSession(
|
||||
proxies={"all": proxy},
|
||||
headers=headers,
|
||||
timeout=timeout
|
||||
) as session:
|
||||
model = cls.get_model(model)
|
||||
data = {"prompt": prompt, **extra_data}
|
||||
data = {"input": data} if model == cls.default_model else data
|
||||
async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
images = data["output"] if "output" in data else data["images"]
|
||||
images = images[0] if len(images) == 1 else images
|
||||
return ImageResponse(images, prompt)
|
@ -8,8 +8,9 @@ import uuid
|
||||
from ..typing import AsyncResult, Messages, ImageType, Cookies
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
from ..image import to_bytes, ImageResponse
|
||||
from ..image import ImageResponse, to_bytes, is_accepted_format
|
||||
from ..requests import StreamSession, FormData, raise_for_status
|
||||
from ..errors import MissingRequirementsError
|
||||
|
||||
from .you.har_file import get_dfp_telemetry_id
|
||||
|
||||
@ -46,6 +47,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
proxy: str = None,
|
||||
timeout: int = 240,
|
||||
chat_mode: str = "default",
|
||||
**kwargs,
|
||||
) -> AsyncResult:
|
||||
@ -55,12 +57,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
...
|
||||
elif model.startswith("dall-e"):
|
||||
chat_mode = "create"
|
||||
messages = [messages[-1]]
|
||||
else:
|
||||
chat_mode = "custom"
|
||||
model = cls.get_model(model)
|
||||
async with StreamSession(
|
||||
proxies={"all": proxy},
|
||||
impersonate="chrome"
|
||||
impersonate="chrome",
|
||||
timeout=(30, timeout)
|
||||
) as session:
|
||||
cookies = await cls.get_cookies(session) if chat_mode != "default" else None
|
||||
upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
|
||||
@ -73,7 +77,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"q": format_prompt(messages),
|
||||
"domain": "youchat",
|
||||
"selectedChatMode": chat_mode,
|
||||
#"chat": json.dumps(chat),
|
||||
}
|
||||
params = {
|
||||
"userFiles": upload,
|
||||
@ -113,7 +116,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
await raise_for_status(response)
|
||||
upload_nonce = await response.text()
|
||||
data = FormData()
|
||||
data.add_field('file', file, filename=filename)
|
||||
data.add_field('file', file, content_type=is_accepted_format(file), filename=filename)
|
||||
async with client.post(
|
||||
f"{cls.url}/api/upload",
|
||||
data=data,
|
||||
|
@ -21,6 +21,7 @@ from .ChatgptFree import ChatgptFree
|
||||
from .ChatgptNext import ChatgptNext
|
||||
from .ChatgptX import ChatgptX
|
||||
from .DeepInfra import DeepInfra
|
||||
from .DeepInfraImage import DeepInfraImage
|
||||
from .DuckDuckGo import DuckDuckGo
|
||||
from .FlowGpt import FlowGpt
|
||||
from .FreeChatgpt import FreeChatgpt
|
||||
|
31
g4f/Provider/needs_auth/OpenRouter.py
Normal file
31
g4f/Provider/needs_auth/OpenRouter.py
Normal file
@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import requests
|
||||
|
||||
from .Openai import Openai
|
||||
from ...typing import AsyncResult, Messages
|
||||
|
||||
class OpenRouter(Openai):
|
||||
url = "https://openrouter.ai"
|
||||
working = True
|
||||
default_model = "openrouter/auto"
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
if not cls.models:
|
||||
url = 'https://openrouter.ai/api/v1/models'
|
||||
models = requests.get(url).json()["data"]
|
||||
cls.models = [model['id'] for model in models]
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
api_base: str = "https://openrouter.ai/api/v1",
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
return super().create_async_generator(
|
||||
model, messages, api_base=api_base, **kwargs
|
||||
)
|
@ -2,10 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ..helper import filter_none
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
|
||||
from ...typing import Union, Optional, AsyncResult, Messages
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...requests import StreamSession
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
from ...errors import MissingAuthError, ResponseError
|
||||
|
||||
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
@ -99,10 +99,3 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
),
|
||||
**({} if headers is None else headers)
|
||||
}
|
||||
|
||||
def filter_none(**kwargs) -> dict:
|
||||
return {
|
||||
key: value
|
||||
for key, value in kwargs.items()
|
||||
if value is not None
|
||||
}
|
@ -334,7 +334,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
RuntimeError: If an error occurs during processing.
|
||||
"""
|
||||
async with StreamSession(
|
||||
proxies={"https": proxy},
|
||||
proxies={"all": proxy},
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
|
@ -6,3 +6,4 @@ from .OpenaiChat import OpenaiChat
|
||||
from .Poe import Poe
|
||||
from .Openai import Openai
|
||||
from .Groq import Groq
|
||||
from .OpenRouter import OpenRouter
|
@ -76,7 +76,7 @@ class Api:
|
||||
@self.app.get("/v1/models")
|
||||
async def models():
|
||||
model_list = dict(
|
||||
(model, g4f.ModelUtils.convert[model])
|
||||
(model, g4f.models.ModelUtils.convert[model])
|
||||
for model in g4f.Model.__all__()
|
||||
)
|
||||
model_list = [{
|
||||
|
@ -132,10 +132,18 @@
|
||||
<label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
|
||||
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="OpenRouter-api_key" class="label" title="">OpenRouter: api_key</label>
|
||||
<textarea id="OpenRouter-api_key" name="OpenRouter[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
|
||||
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="DeepInfra-api_key" class="label" title="">DeepInfra: api_key</label>
|
||||
<textarea id="DeepInfra-api_key" name="DeepInfra[api_key]" placeholder="..."></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bottom_buttons">
|
||||
<button onclick="delete_conversations()">
|
||||
|
@ -109,7 +109,7 @@ body {
|
||||
}
|
||||
|
||||
.conversations {
|
||||
max-width: 280px;
|
||||
max-width: 300px;
|
||||
padding: var(--section-gap);
|
||||
overflow: auto;
|
||||
flex-shrink: 0;
|
||||
@ -207,9 +207,9 @@ body {
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.conversations .convo .fa-trash {
|
||||
.conversations .convo .fa-ellipsis-vertical {
|
||||
position: absolute;
|
||||
right: 8px;
|
||||
right: 14px;
|
||||
}
|
||||
|
||||
.conversations .convo .choise {
|
||||
@ -1075,6 +1075,10 @@ a:-webkit-any-link {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
.settings textarea {
|
||||
height: 51px;
|
||||
}
|
||||
|
||||
.settings {
|
||||
width: 100%;
|
||||
display: flex;
|
||||
|
@ -42,7 +42,7 @@ appStorage = window.localStorage || {
|
||||
const markdown = window.markdownit();
|
||||
const markdown_render = (content) => {
|
||||
return markdown.render(content
|
||||
.replaceAll(/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, "")
|
||||
.replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
|
||||
.replaceAll(/<img data-prompt="[^>]+">/gm, "")
|
||||
)
|
||||
.replaceAll("<a href=", '<a target="_blank" href=')
|
||||
@ -127,9 +127,6 @@ const register_message_buttons = async () => {
|
||||
sound.controls = 'controls';
|
||||
sound.src = url;
|
||||
sound.type = 'audio/wav';
|
||||
if (ended && !stopped) {
|
||||
sound.autoplay = true;
|
||||
}
|
||||
sound.onended = function() {
|
||||
ended = true;
|
||||
};
|
||||
@ -140,6 +137,9 @@ const register_message_buttons = async () => {
|
||||
container.classList.add("audio");
|
||||
container.appendChild(sound);
|
||||
content_el.appendChild(container);
|
||||
if (ended && !stopped) {
|
||||
sound.play();
|
||||
}
|
||||
}
|
||||
if (lines.length < 1 || stopped) {
|
||||
el.classList.remove("active");
|
||||
@ -608,12 +608,11 @@ async function get_messages(conversation_id) {
|
||||
}
|
||||
|
||||
async function add_conversation(conversation_id, content) {
|
||||
if (content.length > 17) {
|
||||
title = content.substring(0, 17) + '...'
|
||||
if (content.length > 18) {
|
||||
title = content.substring(0, 18) + '...'
|
||||
} else {
|
||||
title = content + ' '.repeat(19 - content.length)
|
||||
title = content + ' '.repeat(20 - content.length)
|
||||
}
|
||||
|
||||
if (appStorage.getItem(`conversation:${conversation_id}`) == null) {
|
||||
await save_conversation(conversation_id, {
|
||||
id: conversation_id,
|
||||
@ -623,7 +622,6 @@ async function add_conversation(conversation_id, content) {
|
||||
items: [],
|
||||
});
|
||||
}
|
||||
|
||||
history.pushState({}, null, `/chat/${conversation_id}`);
|
||||
}
|
||||
|
||||
@ -695,27 +693,31 @@ const load_conversations = async () => {
|
||||
|
||||
await clear_conversations();
|
||||
|
||||
for (conversation of conversations) {
|
||||
conversations.sort((a, b) => (b.updated||0)-(a.updated||0));
|
||||
|
||||
let html = "";
|
||||
conversations.forEach((conversation) => {
|
||||
let updated = "";
|
||||
if (conversation.updated) {
|
||||
const date = new Date(conversation.updated);
|
||||
updated = date.toLocaleString('en-GB', {dateStyle: 'short', timeStyle: 'short', monthStyle: 'short'});
|
||||
updated = updated.replace("/" + date.getFullYear(), "")
|
||||
}
|
||||
box_conversations.innerHTML += `
|
||||
html += `
|
||||
<div class="convo" id="convo-${conversation.id}">
|
||||
<div class="left" onclick="set_conversation('${conversation.id}')">
|
||||
<i class="fa-regular fa-comments"></i>
|
||||
<span class="convo-title"><span class="datetime">${updated}</span> ${conversation.title}</span>
|
||||
</div>
|
||||
<i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i>
|
||||
<i onclick="show_option('${conversation.id}')" class="fa-solid fa-ellipsis-vertical" id="conv-${conversation.id}"></i>
|
||||
<div id="cho-${conversation.id}" class="choise" style="display:none;">
|
||||
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check"></i>
|
||||
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-trash"></i>
|
||||
<i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x"></i>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
});
|
||||
box_conversations.innerHTML = html;
|
||||
};
|
||||
|
||||
document.getElementById("cancelButton").addEventListener("click", async () => {
|
||||
@ -804,6 +806,7 @@ const register_settings_storage = async () => {
|
||||
appStorage.setItem(element.id, element.selectedIndex);
|
||||
break;
|
||||
case "text":
|
||||
case "number":
|
||||
appStorage.setItem(element.id, element.value);
|
||||
break;
|
||||
default:
|
||||
@ -828,6 +831,7 @@ const load_settings_storage = async () => {
|
||||
element.selectedIndex = parseInt(value);
|
||||
break;
|
||||
case "text":
|
||||
case "number":
|
||||
case "textarea":
|
||||
element.value = value;
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@ from g4f import version, models
|
||||
from g4f import get_last_provider, ChatCompletion
|
||||
from g4f.errors import VersionNotFoundError
|
||||
from g4f.Provider import ProviderType, __providers__, __map__
|
||||
from g4f.providers.base_provider import ProviderModelMixin
|
||||
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
|
||||
from g4f.providers.conversation import BaseConversation
|
||||
|
||||
conversations: dict[dict[str, BaseConversation]] = {}
|
||||
@ -134,7 +134,7 @@ class Api():
|
||||
elif isinstance(chunk, Exception):
|
||||
logging.exception(chunk)
|
||||
yield self._format_json("message", get_error_message(chunk))
|
||||
else:
|
||||
elif not isinstance(chunk, FinishReason):
|
||||
yield self._format_json("content", str(chunk))
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
@ -50,3 +50,10 @@ def get_random_hex(length: int = 32) -> str:
|
||||
random.choice("abcdef" + string.digits)
|
||||
for _ in range(length)
|
||||
)
|
||||
|
||||
def filter_none(**kwargs) -> dict:
|
||||
return {
|
||||
key: value
|
||||
for key, value in kwargs.items()
|
||||
if value is not None
|
||||
}
|
Loading…
Reference in New Issue
Block a user