mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-26 09:57:24 +03:00
Add Ecosia Provider, Add OpenaiAccount alias
Use AsyncClient in API, add web_search parameter in API Improve error messages in Openai
This commit is contained in:
parent
a82021fecd
commit
7e543f4747
18
examples/ecosia.py
Normal file
18
examples/ecosia.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
import asyncio
|
||||||
|
import g4f
|
||||||
|
from g4f.client import AsyncClient
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
client = AsyncClient(
|
||||||
|
provider=g4f.Provider.Ecosia,
|
||||||
|
)
|
||||||
|
async for chunk in client.chat.completions.create(
|
||||||
|
[{"role": "user", "content": "happy dogs on work. write some lines"}],
|
||||||
|
g4f.models.default,
|
||||||
|
stream=True,
|
||||||
|
green=True,
|
||||||
|
):
|
||||||
|
print(chunk.choices[0].delta.content or "", end="")
|
||||||
|
print(f"\nwith {chunk.model}")
|
||||||
|
|
||||||
|
asyncio.run(main())
|
47
g4f/Provider/Ecosia.py
Normal file
47
g4f/Provider/Ecosia.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession, BaseConnector
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..requests.raise_for_status import raise_for_status
|
||||||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from .helper import get_connector
|
||||||
|
|
||||||
|
class Ecosia(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
url = "https://www.ecosia.org"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
default_model = "gpt-3.5-turbo-0125"
|
||||||
|
model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
connector: BaseConnector = None,
|
||||||
|
green: bool = False,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
cls.get_model(model)
|
||||||
|
headers = {
|
||||||
|
"authority": "api.ecosia.org",
|
||||||
|
"accept": "*/*",
|
||||||
|
"origin": cls.url,
|
||||||
|
"referer": f"{cls.url}/",
|
||||||
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
|
||||||
|
}
|
||||||
|
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
|
||||||
|
data = {
|
||||||
|
"messages": base64.b64encode(json.dumps(messages).encode()).decode()
|
||||||
|
}
|
||||||
|
api_url = f"https://api.ecosia.org/v2/chat/?sp={'eco' if green else 'productivity'}"
|
||||||
|
async with session.post(api_url, json=data) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
async for chunk in response.content.iter_any():
|
||||||
|
if chunk:
|
||||||
|
yield chunk.decode(errors="ignore")
|
@ -132,7 +132,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def get_cookies(cls, client: StreamSession) -> Cookies:
|
async def get_cookies(cls, client: StreamSession) -> Cookies:
|
||||||
|
|
||||||
if not cls._cookies or cls._cookies_used >= 5:
|
if not cls._cookies or cls._cookies_used >= 5:
|
||||||
cls._cookies = await cls.create_cookies(client)
|
cls._cookies = await cls.create_cookies(client)
|
||||||
cls._cookies_used = 0
|
cls._cookies_used = 0
|
||||||
|
@ -23,9 +23,11 @@ from .ChatgptFree import ChatgptFree
|
|||||||
from .ChatgptNext import ChatgptNext
|
from .ChatgptNext import ChatgptNext
|
||||||
from .ChatgptX import ChatgptX
|
from .ChatgptX import ChatgptX
|
||||||
from .Cnote import Cnote
|
from .Cnote import Cnote
|
||||||
|
from .Cohere import Cohere
|
||||||
from .DeepInfra import DeepInfra
|
from .DeepInfra import DeepInfra
|
||||||
from .DeepInfraImage import DeepInfraImage
|
from .DeepInfraImage import DeepInfraImage
|
||||||
from .DuckDuckGo import DuckDuckGo
|
from .DuckDuckGo import DuckDuckGo
|
||||||
|
from .Ecosia import Ecosia
|
||||||
from .Feedough import Feedough
|
from .Feedough import Feedough
|
||||||
from .FlowGpt import FlowGpt
|
from .FlowGpt import FlowGpt
|
||||||
from .FreeChatgpt import FreeChatgpt
|
from .FreeChatgpt import FreeChatgpt
|
||||||
@ -46,7 +48,6 @@ from .ReplicateImage import ReplicateImage
|
|||||||
from .Vercel import Vercel
|
from .Vercel import Vercel
|
||||||
from .WhiteRabbitNeo import WhiteRabbitNeo
|
from .WhiteRabbitNeo import WhiteRabbitNeo
|
||||||
from .You import You
|
from .You import You
|
||||||
from .Cohere import Cohere
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ class OpenRouter(Openai):
|
|||||||
label = "OpenRouter"
|
label = "OpenRouter"
|
||||||
url = "https://openrouter.ai"
|
url = "https://openrouter.ai"
|
||||||
working = True
|
working = True
|
||||||
default_model = "openrouter/auto"
|
default_model = "mistralai/mistral-7b-instruct:free"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls):
|
def get_models(cls):
|
||||||
|
@ -56,6 +56,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
if not stream:
|
if not stream:
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
|
cls.raise_error(data)
|
||||||
choice = data["choices"][0]
|
choice = data["choices"][0]
|
||||||
if "content" in choice["message"]:
|
if "content" in choice["message"]:
|
||||||
yield choice["message"]["content"].strip()
|
yield choice["message"]["content"].strip()
|
||||||
@ -70,8 +71,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if chunk == b"[DONE]":
|
if chunk == b"[DONE]":
|
||||||
break
|
break
|
||||||
data = json.loads(chunk)
|
data = json.loads(chunk)
|
||||||
if "error_message" in data:
|
cls.raise_error(data)
|
||||||
raise ResponseError(data["error_message"])
|
|
||||||
choice = data["choices"][0]
|
choice = data["choices"][0]
|
||||||
if "content" in choice["delta"] and choice["delta"]["content"]:
|
if "content" in choice["delta"] and choice["delta"]["content"]:
|
||||||
delta = choice["delta"]["content"]
|
delta = choice["delta"]["content"]
|
||||||
@ -89,6 +89,13 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if "finish_reason" in choice and choice["finish_reason"] is not None:
|
if "finish_reason" in choice and choice["finish_reason"] is not None:
|
||||||
return FinishReason(choice["finish_reason"])
|
return FinishReason(choice["finish_reason"])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def raise_error(data: dict):
|
||||||
|
if "error_message" in data:
|
||||||
|
raise ResponseError(data["error_message"])
|
||||||
|
elif "error" in data:
|
||||||
|
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
||||||
return {
|
return {
|
||||||
|
7
g4f/Provider/needs_auth/OpenaiAccount.py
Normal file
7
g4f/Provider/needs_auth/OpenaiAccount.py
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .OpenaiChat import OpenaiChat
|
||||||
|
|
||||||
|
class OpenaiAccount(OpenaiChat):
|
||||||
|
label = "OpenAI ChatGPT with Account"
|
||||||
|
needs_auth = True
|
@ -23,7 +23,7 @@ except ImportError:
|
|||||||
|
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from ...webdriver import get_browser
|
from ...webdriver import get_browser
|
||||||
from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator
|
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
|
||||||
from ...requests import get_args_from_browser, raise_for_status
|
from ...requests import get_args_from_browser, raise_for_status
|
||||||
from ...requests.aiohttp import StreamSession
|
from ...requests.aiohttp import StreamSession
|
||||||
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
|
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
|
||||||
@ -35,7 +35,7 @@ from ... import debug
|
|||||||
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"""A class for creating and managing conversations with OpenAI chat service"""
|
"""A class for creating and managing conversations with OpenAI chat service"""
|
||||||
|
|
||||||
lebel = "OpenAI ChatGPT"
|
label = "OpenAI ChatGPT"
|
||||||
url = "https://chat.openai.com"
|
url = "https://chat.openai.com"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
@ -295,7 +295,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 120,
|
timeout: int = 180,
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
cookies: Cookies = None,
|
cookies: Cookies = None,
|
||||||
auto_continue: bool = False,
|
auto_continue: bool = False,
|
||||||
@ -348,7 +348,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if api_key is not None:
|
if api_key is not None:
|
||||||
cls._set_api_key(api_key)
|
cls._set_api_key(api_key)
|
||||||
|
|
||||||
if cls.default_model is None and cls._api_key is not None:
|
if cls.default_model is None and (not cls.needs_auth or cls._api_key is not None):
|
||||||
try:
|
try:
|
||||||
if not model:
|
if not model:
|
||||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||||
@ -368,12 +368,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy)
|
arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy)
|
||||||
cls._create_request_args(cookies)
|
cls._create_request_args(cookies)
|
||||||
cls._set_api_key(api_key)
|
cls._set_api_key(api_key)
|
||||||
except NoValidHarFileError:
|
except NoValidHarFileError as e:
|
||||||
...
|
...
|
||||||
if cls._api_key is None:
|
if cls._api_key is None:
|
||||||
if debug.logging:
|
|
||||||
print("Getting access token with nodriver.")
|
|
||||||
await cls.nodriver_access_token()
|
await cls.nodriver_access_token()
|
||||||
|
if cls._api_key is None and cls.needs_auth:
|
||||||
|
raise e
|
||||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||||
|
|
||||||
async with session.post(
|
async with session.post(
|
||||||
@ -589,10 +589,11 @@ this.fetch = async (url, options) => {
|
|||||||
user_data_dir = user_config_dir("g4f-nodriver")
|
user_data_dir = user_config_dir("g4f-nodriver")
|
||||||
except:
|
except:
|
||||||
user_data_dir = None
|
user_data_dir = None
|
||||||
|
if debug.logging:
|
||||||
|
print(f"Open nodriver with user_dir: {user_data_dir}")
|
||||||
browser = await uc.start(user_data_dir=user_data_dir)
|
browser = await uc.start(user_data_dir=user_data_dir)
|
||||||
page = await browser.get("https://chat.openai.com/")
|
page = await browser.get("https://chat.openai.com/")
|
||||||
while await page.query_selector("#prompt-textarea") is None:
|
while await page.find("[id^=headlessui-menu-button-]") is None:
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
api_key = await page.evaluate(
|
api_key = await page.evaluate(
|
||||||
"(async () => {"
|
"(async () => {"
|
||||||
@ -609,8 +610,9 @@ this.fetch = async (url, options) => {
|
|||||||
for c in await page.browser.cookies.get_all():
|
for c in await page.browser.cookies.get_all():
|
||||||
if c.domain.endswith("chat.openai.com"):
|
if c.domain.endswith("chat.openai.com"):
|
||||||
cookies[c.name] = c.value
|
cookies[c.name] = c.value
|
||||||
|
user_agent = await page.evaluate("window.navigator.userAgent")
|
||||||
await page.close()
|
await page.close()
|
||||||
cls._create_request_args(cookies)
|
cls._create_request_args(cookies, user_agent)
|
||||||
cls._set_api_key(api_key)
|
cls._set_api_key(api_key)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -662,7 +664,7 @@ this.fetch = async (url, options) => {
|
|||||||
"content-type": "application/json",
|
"content-type": "application/json",
|
||||||
"oai-device-id": str(uuid.uuid4()),
|
"oai-device-id": str(uuid.uuid4()),
|
||||||
"oai-language": "en-US",
|
"oai-language": "en-US",
|
||||||
"sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Google Chrome\";v=\"122\"",
|
"sec-ch-ua": "\"Google Chrome\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
|
||||||
"sec-ch-ua-mobile": "?0",
|
"sec-ch-ua-mobile": "?0",
|
||||||
"sec-ch-ua-platform": "\"Linux\"",
|
"sec-ch-ua-platform": "\"Linux\"",
|
||||||
"sec-fetch-dest": "empty",
|
"sec-fetch-dest": "empty",
|
||||||
@ -675,8 +677,10 @@ this.fetch = async (url, options) => {
|
|||||||
return "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")
|
return "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _create_request_args(cls, cookies: Cookies = None):
|
def _create_request_args(cls, cookies: Cookies = None, user_agent: str = None):
|
||||||
cls._headers = cls.get_default_headers()
|
cls._headers = cls.get_default_headers()
|
||||||
|
if user_agent is not None:
|
||||||
|
cls._headers["user-agent"] = user_agent
|
||||||
cls._cookies = {} if cookies is None else cookies
|
cls._cookies = {} if cookies is None else cookies
|
||||||
cls._update_cookie_header()
|
cls._update_cookie_header()
|
||||||
|
|
||||||
|
@ -6,4 +6,5 @@ from .OpenaiChat import OpenaiChat
|
|||||||
from .Poe import Poe
|
from .Poe import Poe
|
||||||
from .Openai import Openai
|
from .Openai import Openai
|
||||||
from .Groq import Groq
|
from .Groq import Groq
|
||||||
from .OpenRouter import OpenRouter
|
from .OpenRouter import OpenRouter
|
||||||
|
from .OpenaiAccount import OpenaiAccount
|
@ -1,7 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
import uvicorn
|
import uvicorn
|
||||||
import nest_asyncio
|
|
||||||
|
|
||||||
from fastapi import FastAPI, Response, Request
|
from fastapi import FastAPI, Response, Request
|
||||||
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
|
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
|
||||||
@ -9,22 +8,23 @@ from fastapi.exceptions import RequestValidationError
|
|||||||
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
|
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
|
||||||
from fastapi.encoders import jsonable_encoder
|
from fastapi.encoders import jsonable_encoder
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from typing import List, Union
|
from typing import List, Union, Optional
|
||||||
|
|
||||||
import g4f
|
import g4f
|
||||||
import g4f.debug
|
import g4f.debug
|
||||||
from g4f.client import Client
|
from g4f.client import AsyncClient
|
||||||
from g4f.typing import Messages
|
from g4f.typing import Messages
|
||||||
|
|
||||||
class ChatCompletionsConfig(BaseModel):
|
class ChatCompletionsConfig(BaseModel):
|
||||||
messages: Messages
|
messages: Messages
|
||||||
model: str
|
model: str
|
||||||
provider: Union[str, None] = None
|
provider: Optional[str] = None
|
||||||
stream: bool = False
|
stream: bool = False
|
||||||
temperature: Union[float, None] = None
|
temperature: Optional[float] = None
|
||||||
max_tokens: Union[int, None] = None
|
max_tokens: Optional[int] = None
|
||||||
stop: Union[list[str], str, None] = None
|
stop: Union[list[str], str, None] = None
|
||||||
api_key: Union[str, None] = None
|
api_key: Optional[str] = None
|
||||||
|
web_search: Optional[bool] = None
|
||||||
|
|
||||||
class Api:
|
class Api:
|
||||||
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
||||||
@ -36,9 +36,7 @@ class Api:
|
|||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
g4f.debug.logging = True
|
g4f.debug.logging = True
|
||||||
self.client = Client()
|
self.client = AsyncClient()
|
||||||
|
|
||||||
nest_asyncio.apply()
|
|
||||||
self.app = FastAPI()
|
self.app = FastAPI()
|
||||||
|
|
||||||
self.routes()
|
self.routes()
|
||||||
@ -90,7 +88,7 @@ class Api:
|
|||||||
@self.app.get("/v1/models/{model_name}")
|
@self.app.get("/v1/models/{model_name}")
|
||||||
async def model_info(model_name: str):
|
async def model_info(model_name: str):
|
||||||
try:
|
try:
|
||||||
model_info = g4f.ModelUtils.convert[model_name]
|
model_info = g4f.models.ModelUtils.convert[model_name]
|
||||||
return JSONResponse({
|
return JSONResponse({
|
||||||
'id': model_name,
|
'id': model_name,
|
||||||
'object': 'model',
|
'object': 'model',
|
||||||
@ -119,17 +117,18 @@ class Api:
|
|||||||
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
|
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
|
||||||
|
|
||||||
if not config.stream:
|
if not config.stream:
|
||||||
return JSONResponse(response.to_json())
|
return JSONResponse((await response).to_json())
|
||||||
|
|
||||||
def streaming():
|
async def streaming():
|
||||||
try:
|
try:
|
||||||
for chunk in response:
|
async for chunk in response:
|
||||||
yield f"data: {json.dumps(chunk.to_json())}\n\n"
|
yield f"data: {json.dumps(chunk.to_json())}\n\n"
|
||||||
except GeneratorExit:
|
except GeneratorExit:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
yield f'data: {format_exception(e, config)}'
|
yield f'data: {format_exception(e, config)}\n\n'
|
||||||
|
yield "data: [DONE]\n\n"
|
||||||
|
|
||||||
return StreamingResponse(streaming(), media_type="text/event-stream")
|
return StreamingResponse(streaming(), media_type="text/event-stream")
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@
|
|||||||
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder=""__Secure-1PSID" cookie"></textarea>
|
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder=""__Secure-1PSID" cookie"></textarea>
|
||||||
</div>
|
</div>
|
||||||
<div class="field box">
|
<div class="field box">
|
||||||
<label for="GeminiPro-api_key" class="label" title="">GeminiPro:</label>
|
<label for="GeminiPro-api_key" class="label" title="">GeminiPro API:</label>
|
||||||
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="api_key"></textarea>
|
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="api_key"></textarea>
|
||||||
</div>
|
</div>
|
||||||
<div class="field box">
|
<div class="field box">
|
||||||
@ -146,12 +146,12 @@
|
|||||||
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="api_key"></textarea>
|
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="api_key"></textarea>
|
||||||
</div>
|
</div>
|
||||||
<div class="field box">
|
<div class="field box">
|
||||||
<label for="Openai-api_key" class="label" title="">Openai:</label>
|
<label for="Openai-api_key" class="label" title="">OpenAI API:</label>
|
||||||
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="api_key"></textarea>
|
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="api_key"></textarea>
|
||||||
</div>
|
</div>
|
||||||
<div class="field box">
|
<div class="field box">
|
||||||
<label for="OpenaiChat-api_key" class="label" title="">OpenaiChat:</label>
|
<label for="OpenaiAccount-api_key" class="label" title="">OpenAI ChatGPT:</label>
|
||||||
<textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="api_key"></textarea>
|
<textarea id="OpenaiAccount-api_key" name="OpenaiAccount[api_key]" placeholder="access_key"></textarea>
|
||||||
</div>
|
</div>
|
||||||
<div class="field box">
|
<div class="field box">
|
||||||
<label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label>
|
<label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label>
|
||||||
|
@ -264,16 +264,18 @@ class AsyncGeneratorProvider(AsyncProvider):
|
|||||||
AsyncResult: An asynchronous generator yielding results.
|
AsyncResult: An asynchronous generator yielding results.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
class ProviderModelMixin:
|
class ProviderModelMixin:
|
||||||
default_model: str
|
default_model: str
|
||||||
models: list[str] = []
|
models: list[str] = []
|
||||||
model_aliases: dict[str, str] = {}
|
model_aliases: dict[str, str] = {}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls) -> list[str]:
|
def get_models(cls) -> list[str]:
|
||||||
|
if not cls.models:
|
||||||
|
return [cls.default_model]
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_model(cls, model: str) -> str:
|
def get_model(cls, model: str) -> str:
|
||||||
if not model and cls.default_model is not None:
|
if not model and cls.default_model is not None:
|
||||||
|
Loading…
Reference in New Issue
Block a user