Add workers and use_colors options to api

This commit is contained in:
Heiner Lohaus 2024-04-20 15:41:49 +02:00
parent 932d3a3ca0
commit 83484c0a56
8 changed files with 103 additions and 76 deletions

19
etc/examples/api.py Normal file
View File

@ -0,0 +1,19 @@
import requests
import json
url = "http://localhost:1337/v1/chat/completions"
body = {
"model": "",
"provider": "MetaAI",
"stream": True,
"messages": [
{"role": "assistant", "content": "What can you do? Who are you?"}
]
}
lines = requests.post(url, json=body, stream=True).iter_lines()
for line in lines:
if line.startswith(b"data: "):
try:
print(json.loads(line[6:]).get("choices", [{"delta": {}}])[0]["delta"].get("content", ""), end="")
except json.JSONDecodeError:
pass
print()

View File

@ -9,9 +9,15 @@ from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt, get_connector, get_cookies
from .helper import format_prompt, get_connector
class Sources():
def __init__(self, list: List[Dict[str, str]]) -> None:
self.list = list
def __str__(self) -> str:
return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list]))
class MetaAI(AsyncGeneratorProvider):
url = "https://www.meta.ai"
@ -19,6 +25,8 @@ class MetaAI(AsyncGeneratorProvider):
def __init__(self, proxy: str = None, connector: BaseConnector = None):
self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
self.cookies: Cookies = None
self.access_token: str = None
@classmethod
async def create_async_generator(
@ -32,11 +40,11 @@ class MetaAI(AsyncGeneratorProvider):
async for chunk in cls(proxy).prompt(format_prompt(messages)):
yield chunk
async def get_access_token(self, cookies: Cookies, birthday: str = "1999-01-01") -> str:
async def get_access_token(self, birthday: str = "1999-01-01") -> str:
url = "https://www.meta.ai/api/graphql/"
payload = {
"lsd": cookies["lsd"],
"lsd": self.lsd,
"fb_api_caller_class": "RelayModern",
"fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation",
"variables": json.dumps({
@ -48,29 +56,30 @@ class MetaAI(AsyncGeneratorProvider):
}
headers = {
"x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation",
"x-fb-lsd": cookies["lsd"],
"x-fb-lsd": self.lsd,
"x-asbd-id": "129477",
"alt-used": "www.meta.ai",
"sec-fetch-site": "same-origin"
}
async with self.session.post(url, headers=headers, cookies=cookies, data=payload) as response:
async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
await raise_for_status(response, "Fetch access_token failed")
auth_json = await response.json(content_type=None)
access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"]
return access_token
async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult:
access_token = None
if cookies is None:
cookies = await self.get_cookies()
access_token = await self.get_access_token(cookies)
else:
cookies = await self.get_cookies(cookies)
if cookies is not None:
self.cookies = cookies
self.access_token = None
if self.cookies is None:
self.cookies = await self.get_cookies()
if self.access_token is None:
self.access_token = await self.get_access_token()
url = "https://graph.meta.ai/graphql?locale=user"
#url = "https://www.meta.ai/api/graphql/"
payload = {
"access_token": access_token,
"access_token": self.access_token,
#"lsd": cookies["lsd"],
"fb_api_caller_class": "RelayModern",
"fb_api_req_friendly_name": "useAbraSendMessageMutation",
@ -95,7 +104,7 @@ class MetaAI(AsyncGeneratorProvider):
"x-fb-friendly-name": "useAbraSendMessageMutation",
#"x-fb-lsd": cookies["lsd"],
}
async with self.session.post(url, headers=headers, cookies=cookies, data=payload) as response:
async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
await raise_for_status(response, "Fetch response failed")
last_snippet_len = 0
fetch_id = None
@ -106,25 +115,25 @@ class MetaAI(AsyncGeneratorProvider):
continue
bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
streaming_state = bot_response_message.get("streaming_state")
fetch_id = bot_response_message.get("fetch_id")
fetch_id = bot_response_message.get("fetch_id") or fetch_id
if streaming_state in ("STREAMING", "OVERALL_DONE"):
#imagine_card = bot_response_message["imagine_card"]
snippet = bot_response_message["snippet"]
yield snippet[last_snippet_len:]
last_snippet_len = len(snippet)
elif streaming_state == "OVERALL_DONE":
break
new_snippet_len = len(snippet)
if new_snippet_len > last_snippet_len:
yield snippet[last_snippet_len:]
last_snippet_len = new_snippet_len
#if last_streamed_response is None:
# if attempts > 3:
# raise Exception("MetaAI is having issues and was not able to respond (Server Error)")
# access_token = await self.get_access_token()
# return await self.prompt(message=message, attempts=attempts + 1)
if fetch_id is not None:
sources = await self.fetch_sources(fetch_id, cookies, access_token)
sources = await self.fetch_sources(fetch_id)
if sources is not None:
yield sources
async def get_cookies(self, cookies: Cookies = None) -> dict:
async def get_cookies(self, cookies: Cookies = None) -> Cookies:
async with self.session.get("https://www.meta.ai/", cookies=cookies) as response:
await raise_for_status(response, "Fetch home failed")
text = await response.text()
@ -134,13 +143,13 @@ class MetaAI(AsyncGeneratorProvider):
"abra_csrf": self.extract_value(text, "abra_csrf"),
"datr": self.extract_value(text, "datr"),
}
cookies["lsd"] = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}')
self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}')
return cookies
async def fetch_sources(self, fetch_id: str, cookies: Cookies, access_token: str) -> List[Dict]:
async def fetch_sources(self, fetch_id: str) -> Sources:
url = "https://graph.meta.ai/graphql?locale=user"
payload = {
"access_token": access_token,
"access_token": self.access_token,
"fb_api_caller_class": "RelayModern",
"fb_api_req_friendly_name": "AbraSearchPluginDialogQuery",
"variables": json.dumps({"abraMessageFetchID": fetch_id}),
@ -151,7 +160,7 @@ class MetaAI(AsyncGeneratorProvider):
"authority": "graph.meta.ai",
"x-fb-friendly-name": "AbraSearchPluginDialogQuery",
}
async with self.session.post(url, headers=headers, cookies=cookies, data=payload) as response:
async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
await raise_for_status(response)
response_json = await response.json()
try:
@ -171,7 +180,8 @@ class MetaAI(AsyncGeneratorProvider):
if start >= 0:
start+= len(start_str)
end = text.find(end_str, start)
return text[start:end]
if end >= 0:
return text[start:end]
def generate_offline_threading_id() -> str:
"""
@ -190,10 +200,3 @@ def generate_offline_threading_id() -> str:
threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
return str(threading_id)
class Sources():
def __init__(self, list: List[Dict[str, str]]) -> None:
self.list = list
def __str__(self) -> str:
return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list]))

View File

@ -15,6 +15,8 @@ import g4f.debug
from g4f.client import AsyncClient
from g4f.typing import Messages
app = FastAPI()
class ChatCompletionsConfig(BaseModel):
messages: Messages
model: str
@ -25,53 +27,44 @@ class ChatCompletionsConfig(BaseModel):
stop: Union[list[str], str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
list_ignored_providers: List[str] = None) -> None:
self.engine = engine
self.debug = debug
self.sentry = sentry
def __init__(self, list_ignored_providers: List[str] = None) -> None:
self.list_ignored_providers = list_ignored_providers
if debug:
g4f.debug.logging = True
self.client = AsyncClient()
self.app = FastAPI()
self.routes()
self.register_validation_exception_handler()
def set_list_ignored_providers(self, list: list):
self.list_ignored_providers = list
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
details = exc.errors()
modified_details = []
for error in details:
modified_details.append(
{
"loc": error["loc"],
"message": error["msg"],
"type": error["type"],
}
)
modified_details.append({
"loc": error["loc"],
"message": error["msg"],
"type": error["type"],
})
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder({"detail": modified_details}),
)
def routes(self):
@self.app.get("/")
def register_routes(self):
@app.get("/")
async def read_root():
return RedirectResponse("/v1", 302)
@self.app.get("/v1")
@app.get("/v1")
async def read_root_v1():
return HTMLResponse('g4f API: Go to '
'<a href="/v1/chat/completions">chat/completions</a> '
'or <a href="/v1/models">models</a>.')
@self.app.get("/v1/models")
@app.get("/v1/models")
async def models():
model_list = dict(
(model, g4f.models.ModelUtils.convert[model])
@ -85,7 +78,7 @@ class Api:
} for model_id, model in model_list.items()]
return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
@app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
try:
model_info = g4f.models.ModelUtils.convert[model_name]
@ -98,7 +91,7 @@ class Api:
except:
return JSONResponse({"error": "The model does not exist."})
@self.app.post("/v1/chat/completions")
@app.post("/v1/chat/completions")
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
try:
config.provider = provider if config.provider is None else config.provider
@ -132,13 +125,13 @@ class Api:
return StreamingResponse(streaming(), media_type="text/event-stream")
@self.app.post("/v1/completions")
@app.post("/v1/completions")
async def completions():
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
def run(self, ip, use_colors : bool = False):
split_ip = ip.split(":")
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=use_colors)
api = Api()
api.register_routes()
api.register_validation_exception_handler()
def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
last_provider = g4f.get_last_provider(True)
@ -148,7 +141,19 @@ def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
"provider": last_provider.get("name") if last_provider else config.provider
})
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
app = Api(engine=g4f, debug=debug)
app.run(f"{host}:{port}", use_colors=use_colors)
def run_api(
host: str = '0.0.0.0',
port: int = 1337,
bind: str = None,
debug: bool = False,
workers: int = None,
use_colors: bool = None
) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
if use_colors is None:
use_colors = debug
if bind is not None:
host, port = bind.split(":")
if debug:
g4f.debug.logging = True
uvicorn.run("g4f.api:app", host=host, port=int(port), workers=workers, use_colors=use_colors)#

View File

@ -1,6 +1,4 @@
import g4f
import g4f.api
if __name__ == "__main__":
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
g4f.api.Api(engine = g4f, debug = True).run(ip = "0.0.0.0:10000")
g4f.api.run_api(debug=True)

View File

@ -15,16 +15,18 @@ def main():
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
api_parser=subparsers.add_parser("api")
api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.")
api_parser.add_argument("--debug", type=bool, default=False, help="Enable verbose logging")
api_parser.add_argument("--debug", type=bool, default=False, help="Enable verbose logging.")
api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.")
api_parser.add_argument("--use_colors", type=bool, default=None, help="Use colors.")
api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.name for provider in IgnoredProviders],
default=[], help="List of providers to ignore when processing request.")
subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
args = parser.parse_args()
if args.mode == "api":
from g4f.api import Api
controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers)
controller.run(args.bind)
import g4f.api
g4f.api.api.set_list_ignored_providers(args.ignored_providers)
g4f.api.run_api(bind=args.bind, debug=args.debug, workers=args.workers, use_colors=args.use_colors)
elif args.mode == "gui":
run_gui_args(args)
else:

View File

@ -65,7 +65,7 @@ def get_browser(
WebDriver: An instance of WebDriver configured with the specified options.
"""
if not has_requirements:
raise MissingRequirementsError('Webdriver packages are not installed | pip install -U g4f[webdriver]')
raise MissingRequirementsError('Install Webdriver packages | pip install -U g4f[webdriver]')
browser = find_chrome_executable()
if browser is None:
raise MissingRequirementsError('Install "Google Chrome" browser')