2023-11-13 09:17:13 +03:00
|
|
|
import logging
|
2023-10-12 04:35:11 +03:00
|
|
|
import json
|
2023-11-02 04:27:35 +03:00
|
|
|
import uvicorn
|
|
|
|
import nest_asyncio
|
2024-01-01 19:48:57 +03:00
|
|
|
|
2024-04-07 11:36:13 +03:00
|
|
|
from fastapi import FastAPI, Response, Request
|
2024-02-23 04:35:13 +03:00
|
|
|
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
|
2024-04-07 11:36:13 +03:00
|
|
|
from fastapi.exceptions import RequestValidationError
|
|
|
|
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
|
|
|
|
from fastapi.encoders import jsonable_encoder
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from typing import List, Union
|
2024-01-01 19:48:57 +03:00
|
|
|
|
2023-11-05 00:16:09 +03:00
|
|
|
import g4f
|
2024-02-23 04:35:13 +03:00
|
|
|
import g4f.debug
|
|
|
|
from g4f.client import Client
|
|
|
|
from g4f.typing import Messages
|
|
|
|
|
|
|
|
class ChatCompletionsConfig(BaseModel):
|
|
|
|
messages: Messages
|
|
|
|
model: str
|
2024-02-23 21:30:53 +03:00
|
|
|
provider: Union[str, None] = None
|
2024-02-23 04:35:13 +03:00
|
|
|
stream: bool = False
|
2024-02-23 21:30:53 +03:00
|
|
|
temperature: Union[float, None] = None
|
|
|
|
max_tokens: Union[int, None] = None
|
|
|
|
stop: Union[list[str], str, None] = None
|
|
|
|
api_key: Union[str, None] = None
|
2023-11-02 04:27:35 +03:00
|
|
|
|
2023-11-05 00:16:09 +03:00
|
|
|
class Api:
|
|
|
|
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
2024-01-01 19:48:57 +03:00
|
|
|
list_ignored_providers: List[str] = None) -> None:
|
2023-11-05 00:16:09 +03:00
|
|
|
self.engine = engine
|
|
|
|
self.debug = debug
|
|
|
|
self.sentry = sentry
|
|
|
|
self.list_ignored_providers = list_ignored_providers
|
|
|
|
|
2024-02-23 04:35:13 +03:00
|
|
|
if debug:
|
|
|
|
g4f.debug.logging = True
|
|
|
|
self.client = Client()
|
|
|
|
|
2023-11-05 00:16:09 +03:00
|
|
|
nest_asyncio.apply()
|
2024-02-23 04:35:13 +03:00
|
|
|
self.app = FastAPI()
|
2023-11-05 00:16:09 +03:00
|
|
|
|
2024-02-23 04:35:13 +03:00
|
|
|
self.routes()
|
2024-04-07 11:36:13 +03:00
|
|
|
self.register_validation_exception_handler()
|
|
|
|
|
|
|
|
def register_validation_exception_handler(self):
|
|
|
|
@self.app.exception_handler(RequestValidationError)
|
|
|
|
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
|
|
|
details = exc.errors()
|
|
|
|
modified_details = []
|
|
|
|
for error in details:
|
|
|
|
modified_details.append(
|
|
|
|
{
|
|
|
|
"loc": error["loc"],
|
|
|
|
"message": error["msg"],
|
|
|
|
"type": error["type"],
|
|
|
|
}
|
|
|
|
)
|
|
|
|
return JSONResponse(
|
|
|
|
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
|
|
content=jsonable_encoder({"detail": modified_details}),
|
|
|
|
)
|
2023-11-05 00:16:09 +03:00
|
|
|
|
2024-02-23 04:35:13 +03:00
|
|
|
def routes(self):
|
2023-11-05 00:16:09 +03:00
|
|
|
@self.app.get("/")
|
|
|
|
async def read_root():
|
2024-02-23 04:35:13 +03:00
|
|
|
return RedirectResponse("/v1", 302)
|
2023-11-05 00:16:09 +03:00
|
|
|
|
|
|
|
@self.app.get("/v1")
|
|
|
|
async def read_root_v1():
|
2024-02-23 04:35:13 +03:00
|
|
|
return HTMLResponse('g4f API: Go to '
|
|
|
|
'<a href="/v1/chat/completions">chat/completions</a> '
|
|
|
|
'or <a href="/v1/models">models</a>.')
|
2023-11-05 00:16:09 +03:00
|
|
|
|
|
|
|
@self.app.get("/v1/models")
|
|
|
|
async def models():
|
2024-02-23 04:35:13 +03:00
|
|
|
model_list = dict(
|
2024-04-10 09:14:50 +03:00
|
|
|
(model, g4f.models.ModelUtils.convert[model])
|
2024-02-23 04:35:13 +03:00
|
|
|
for model in g4f.Model.__all__()
|
|
|
|
)
|
|
|
|
model_list = [{
|
|
|
|
'id': model_id,
|
2023-11-05 00:16:09 +03:00
|
|
|
'object': 'model',
|
|
|
|
'created': 0,
|
2024-02-23 04:35:13 +03:00
|
|
|
'owned_by': model.base_provider
|
|
|
|
} for model_id, model in model_list.items()]
|
|
|
|
return JSONResponse(model_list)
|
2023-11-05 00:16:09 +03:00
|
|
|
|
|
|
|
@self.app.get("/v1/models/{model_name}")
|
|
|
|
async def model_info(model_name: str):
|
|
|
|
try:
|
2024-02-23 04:35:13 +03:00
|
|
|
model_info = g4f.ModelUtils.convert[model_name]
|
|
|
|
return JSONResponse({
|
2023-11-05 00:16:09 +03:00
|
|
|
'id': model_name,
|
|
|
|
'object': 'model',
|
|
|
|
'created': 0,
|
|
|
|
'owned_by': model_info.base_provider
|
2024-02-23 04:35:13 +03:00
|
|
|
})
|
2023-11-05 00:16:09 +03:00
|
|
|
except:
|
2024-02-23 04:35:13 +03:00
|
|
|
return JSONResponse({"error": "The model does not exist."})
|
2023-11-05 00:16:09 +03:00
|
|
|
|
|
|
|
@self.app.post("/v1/chat/completions")
|
2024-02-23 04:35:13 +03:00
|
|
|
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
|
2023-11-05 00:16:09 +03:00
|
|
|
try:
|
2024-02-23 04:35:13 +03:00
|
|
|
config.provider = provider if config.provider is None else config.provider
|
2024-02-23 13:33:38 +03:00
|
|
|
if config.api_key is None and request is not None:
|
2024-02-23 04:35:13 +03:00
|
|
|
auth_header = request.headers.get("Authorization")
|
|
|
|
if auth_header is not None:
|
2024-02-29 16:44:51 +03:00
|
|
|
auth_header = auth_header.split(None, 1)[-1]
|
|
|
|
if auth_header and auth_header != "Bearer":
|
|
|
|
config.api_key = auth_header
|
2024-02-23 04:35:13 +03:00
|
|
|
response = self.client.chat.completions.create(
|
2024-02-24 16:52:23 +03:00
|
|
|
**config.dict(exclude_none=True),
|
2023-12-23 22:50:56 +03:00
|
|
|
ignored=self.list_ignored_providers
|
|
|
|
)
|
2023-11-13 09:17:13 +03:00
|
|
|
except Exception as e:
|
|
|
|
logging.exception(e)
|
2024-02-23 04:35:13 +03:00
|
|
|
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
|
|
|
|
|
|
|
|
if not config.stream:
|
|
|
|
return JSONResponse(response.to_json())
|
2023-11-05 00:16:09 +03:00
|
|
|
|
|
|
|
def streaming():
|
|
|
|
try:
|
|
|
|
for chunk in response:
|
2024-02-23 04:35:13 +03:00
|
|
|
yield f"data: {json.dumps(chunk.to_json())}\n\n"
|
2023-11-05 00:16:09 +03:00
|
|
|
except GeneratorExit:
|
|
|
|
pass
|
2023-12-23 22:50:56 +03:00
|
|
|
except Exception as e:
|
|
|
|
logging.exception(e)
|
2024-02-23 04:35:13 +03:00
|
|
|
yield f'data: {format_exception(e, config)}'
|
2023-10-20 21:04:13 +03:00
|
|
|
|
2023-11-07 18:29:23 +03:00
|
|
|
return StreamingResponse(streaming(), media_type="text/event-stream")
|
2023-10-20 21:04:13 +03:00
|
|
|
|
2023-11-05 00:16:09 +03:00
|
|
|
@self.app.post("/v1/completions")
|
|
|
|
async def completions():
|
|
|
|
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
|
2023-11-02 04:27:35 +03:00
|
|
|
|
2024-03-08 04:59:43 +03:00
|
|
|
def run(self, ip, use_colors : bool = False):
|
2023-11-05 00:16:09 +03:00
|
|
|
split_ip = ip.split(":")
|
2024-03-08 04:59:43 +03:00
|
|
|
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=use_colors)
|
2024-02-23 04:35:13 +03:00
|
|
|
|
|
|
|
def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
|
|
|
|
last_provider = g4f.get_last_provider(True)
|
|
|
|
return json.dumps({
|
2024-02-24 16:52:23 +03:00
|
|
|
"error": {"message": f"{e.__class__.__name__}: {e}"},
|
2024-02-23 04:35:13 +03:00
|
|
|
"model": last_provider.get("model") if last_provider else config.model,
|
|
|
|
"provider": last_provider.get("name") if last_provider else config.provider
|
2024-02-23 13:33:38 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
|
|
|
|
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
|
|
|
|
app = Api(engine=g4f, debug=debug)
|
2024-03-08 04:59:43 +03:00
|
|
|
app.run(f"{host}:{port}", use_colors=use_colors)
|