Add Ecosia Provider, Add OpenaiAccount alias

Use AsyncClient in API, add web_search parameter in API
Improve error messages in Openai
This commit is contained in:
Heiner Lohaus 2024-04-18 20:18:51 +02:00
parent a82021fecd
commit 7e543f4747
12 changed files with 125 additions and 40 deletions

18
examples/ecosia.py Normal file
View File

@ -0,0 +1,18 @@
import asyncio
import g4f
from g4f.client import AsyncClient
async def main():
client = AsyncClient(
provider=g4f.Provider.Ecosia,
)
async for chunk in client.chat.completions.create(
[{"role": "user", "content": "happy dogs on work. write some lines"}],
g4f.models.default,
stream=True,
green=True,
):
print(chunk.choices[0].delta.content or "", end="")
print(f"\nwith {chunk.model}")
asyncio.run(main())

47
g4f/Provider/Ecosia.py Normal file
View File

@ -0,0 +1,47 @@
from __future__ import annotations
import base64
import json
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
class Ecosia(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.ecosia.org"
working = True
supports_gpt_35_turbo = True
default_model = "gpt-3.5-turbo-0125"
model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
connector: BaseConnector = None,
green: bool = False,
proxy: str = None,
**kwargs
) -> AsyncResult:
cls.get_model(model)
headers = {
"authority": "api.ecosia.org",
"accept": "*/*",
"origin": cls.url,
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
}
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
data = {
"messages": base64.b64encode(json.dumps(messages).encode()).decode()
}
api_url = f"https://api.ecosia.org/v2/chat/?sp={'eco' if green else 'productivity'}"
async with session.post(api_url, json=data) as response:
await raise_for_status(response)
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode(errors="ignore")

View File

@ -132,7 +132,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
async def get_cookies(cls, client: StreamSession) -> Cookies:
if not cls._cookies or cls._cookies_used >= 5:
cls._cookies = await cls.create_cookies(client)
cls._cookies_used = 0

View File

@ -23,9 +23,11 @@ from .ChatgptFree import ChatgptFree
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
from .Cnote import Cnote
from .Cohere import Cohere
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .DuckDuckGo import DuckDuckGo
from .Ecosia import Ecosia
from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
@ -46,7 +48,6 @@ from .ReplicateImage import ReplicateImage
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
from .Cohere import Cohere
import sys

View File

@ -9,7 +9,7 @@ class OpenRouter(Openai):
label = "OpenRouter"
url = "https://openrouter.ai"
working = True
default_model = "openrouter/auto"
default_model = "mistralai/mistral-7b-instruct:free"
@classmethod
def get_models(cls):

View File

@ -56,6 +56,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
await raise_for_status(response)
if not stream:
data = await response.json()
cls.raise_error(data)
choice = data["choices"][0]
if "content" in choice["message"]:
yield choice["message"]["content"].strip()
@ -70,8 +71,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if chunk == b"[DONE]":
break
data = json.loads(chunk)
if "error_message" in data:
raise ResponseError(data["error_message"])
cls.raise_error(data)
choice = data["choices"][0]
if "content" in choice["delta"] and choice["delta"]["content"]:
delta = choice["delta"]["content"]
@ -89,6 +89,13 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if "finish_reason" in choice and choice["finish_reason"] is not None:
return FinishReason(choice["finish_reason"])
@staticmethod
def raise_error(data: dict):
if "error_message" in data:
raise ResponseError(data["error_message"])
elif "error" in data:
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {

View File

@ -0,0 +1,7 @@
from __future__ import annotations
from .OpenaiChat import OpenaiChat
class OpenaiAccount(OpenaiChat):
label = "OpenAI ChatGPT with Account"
needs_auth = True

View File

@ -23,7 +23,7 @@ except ImportError:
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
@ -35,7 +35,7 @@ from ... import debug
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service"""
lebel = "OpenAI ChatGPT"
label = "OpenAI ChatGPT"
url = "https://chat.openai.com"
working = True
supports_gpt_35_turbo = True
@ -295,7 +295,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
timeout: int = 180,
api_key: str = None,
cookies: Cookies = None,
auto_continue: bool = False,
@ -348,7 +348,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None:
cls._set_api_key(api_key)
if cls.default_model is None and cls._api_key is not None:
if cls.default_model is None and (not cls.needs_auth or cls._api_key is not None):
try:
if not model:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
@ -368,12 +368,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy)
cls._create_request_args(cookies)
cls._set_api_key(api_key)
except NoValidHarFileError:
except NoValidHarFileError as e:
...
if cls._api_key is None:
if debug.logging:
print("Getting access token with nodriver.")
await cls.nodriver_access_token()
if cls._api_key is None and cls.needs_auth:
raise e
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
async with session.post(
@ -589,10 +589,11 @@ this.fetch = async (url, options) => {
user_data_dir = user_config_dir("g4f-nodriver")
except:
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
browser = await uc.start(user_data_dir=user_data_dir)
page = await browser.get("https://chat.openai.com/")
while await page.query_selector("#prompt-textarea") is None:
while await page.find("[id^=headlessui-menu-button-]") is None:
await asyncio.sleep(1)
api_key = await page.evaluate(
"(async () => {"
@ -609,8 +610,9 @@ this.fetch = async (url, options) => {
for c in await page.browser.cookies.get_all():
if c.domain.endswith("chat.openai.com"):
cookies[c.name] = c.value
user_agent = await page.evaluate("window.navigator.userAgent")
await page.close()
cls._create_request_args(cookies)
cls._create_request_args(cookies, user_agent)
cls._set_api_key(api_key)
@classmethod
@ -662,7 +664,7 @@ this.fetch = async (url, options) => {
"content-type": "application/json",
"oai-device-id": str(uuid.uuid4()),
"oai-language": "en-US",
"sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Google Chrome\";v=\"122\"",
"sec-ch-ua": "\"Google Chrome\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
@ -675,8 +677,10 @@ this.fetch = async (url, options) => {
return "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")
@classmethod
def _create_request_args(cls, cookies: Cookies = None):
def _create_request_args(cls, cookies: Cookies = None, user_agent: str = None):
cls._headers = cls.get_default_headers()
if user_agent is not None:
cls._headers["user-agent"] = user_agent
cls._cookies = {} if cookies is None else cookies
cls._update_cookie_header()

View File

@ -7,3 +7,4 @@ from .Poe import Poe
from .Openai import Openai
from .Groq import Groq
from .OpenRouter import OpenRouter
from .OpenaiAccount import OpenaiAccount

View File

@ -1,7 +1,6 @@
import logging
import json
import uvicorn
import nest_asyncio
from fastapi import FastAPI, Response, Request
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
@ -9,22 +8,23 @@ from fastapi.exceptions import RequestValidationError
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from typing import List, Union
from typing import List, Union, Optional
import g4f
import g4f.debug
from g4f.client import Client
from g4f.client import AsyncClient
from g4f.typing import Messages
class ChatCompletionsConfig(BaseModel):
messages: Messages
model: str
provider: Union[str, None] = None
provider: Optional[str] = None
stream: bool = False
temperature: Union[float, None] = None
max_tokens: Union[int, None] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Union[str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
@ -36,9 +36,7 @@ class Api:
if debug:
g4f.debug.logging = True
self.client = Client()
nest_asyncio.apply()
self.client = AsyncClient()
self.app = FastAPI()
self.routes()
@ -90,7 +88,7 @@ class Api:
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
try:
model_info = g4f.ModelUtils.convert[model_name]
model_info = g4f.models.ModelUtils.convert[model_name]
return JSONResponse({
'id': model_name,
'object': 'model',
@ -119,17 +117,18 @@ class Api:
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
if not config.stream:
return JSONResponse(response.to_json())
return JSONResponse((await response).to_json())
def streaming():
async def streaming():
try:
for chunk in response:
async for chunk in response:
yield f"data: {json.dumps(chunk.to_json())}\n\n"
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
yield f'data: {format_exception(e, config)}'
yield f'data: {format_exception(e, config)}\n\n'
yield "data: [DONE]\n\n"
return StreamingResponse(streaming(), media_type="text/event-stream")

View File

@ -134,7 +134,7 @@
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder="&quot;__Secure-1PSID&quot; cookie"></textarea>
</div>
<div class="field box">
<label for="GeminiPro-api_key" class="label" title="">GeminiPro:</label>
<label for="GeminiPro-api_key" class="label" title="">GeminiPro API:</label>
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
@ -146,12 +146,12 @@
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
<label for="Openai-api_key" class="label" title="">Openai:</label>
<label for="Openai-api_key" class="label" title="">OpenAI API:</label>
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
<label for="OpenaiChat-api_key" class="label" title="">OpenaiChat:</label>
<textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="api_key"></textarea>
<label for="OpenaiAccount-api_key" class="label" title="">OpenAI ChatGPT:</label>
<textarea id="OpenaiAccount-api_key" name="OpenaiAccount[api_key]" placeholder="access_key"></textarea>
</div>
<div class="field box">
<label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label>

View File

@ -272,6 +272,8 @@ class ProviderModelMixin:
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
return [cls.default_model]
return cls.models
@classmethod