mirror of
https://github.com/StanGirard/quivr.git
synced 2024-12-24 03:41:56 +03:00
feat: add APIBrainQA (#1606)
Issue: https://github.com/StanGirard/quivr/issues/1566
This commit is contained in:
parent
f49c8c20d0
commit
addcd27fce
128
backend/llm/api_brain_qa.py
Normal file
128
backend/llm/api_brain_qa.py
Normal file
@ -0,0 +1,128 @@
|
||||
import json
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.schema import FunctionMessage
|
||||
from litellm import completion
|
||||
from models.chats import ChatQuestion
|
||||
from models.databases.supabase.chats import CreateChatHistory
|
||||
from repository.brain.get_brain_by_id import get_brain_by_id
|
||||
from repository.chat.format_chat_history import (
|
||||
format_chat_history,
|
||||
format_history_to_openai_mesages,
|
||||
)
|
||||
from repository.chat.get_chat_history import get_chat_history
|
||||
from repository.chat.update_chat_history import update_chat_history
|
||||
from repository.chat.update_message_by_id import update_message_by_id
|
||||
|
||||
from llm.qa_base import QABaseBrainPicking
|
||||
from llm.utils.call_brain_api import call_brain_api
|
||||
from llm.utils.get_api_brain_definition_as_json_schema import (
|
||||
get_api_brain_definition_as_json_schema,
|
||||
)
|
||||
|
||||
|
||||
class APIBrainQA(QABaseBrainPicking):
|
||||
user_id: UUID
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
brain_id: str,
|
||||
chat_id: str,
|
||||
user_id: UUID,
|
||||
streaming: bool = False,
|
||||
prompt_id: Optional[UUID] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
model=model,
|
||||
brain_id=brain_id,
|
||||
chat_id=chat_id,
|
||||
streaming=streaming,
|
||||
prompt_id=prompt_id,
|
||||
**kwargs,
|
||||
)
|
||||
self.user_id = user_id
|
||||
|
||||
async def generate_stream(self, chat_id: UUID, question: ChatQuestion):
|
||||
if not question.brain_id:
|
||||
raise Exception("No brain id provided")
|
||||
|
||||
history = get_chat_history(self.chat_id)
|
||||
prompt_content = self.prompt_to_use.content if self.prompt_to_use else ""
|
||||
brain = get_brain_by_id(question.brain_id)
|
||||
if not brain:
|
||||
raise Exception("No brain found")
|
||||
|
||||
messages = format_history_to_openai_mesages(
|
||||
format_chat_history(history),
|
||||
prompt_content,
|
||||
question.question,
|
||||
)
|
||||
|
||||
response = completion(
|
||||
model=self.model,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
messages=messages,
|
||||
functions=[get_api_brain_definition_as_json_schema(brain)],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
if response.choices[0].finish_reason == "function_call":
|
||||
arguments = json.load(
|
||||
response.choices[0].message["function_call"]["arguments"]
|
||||
)
|
||||
|
||||
content = call_brain_api(
|
||||
brain_id=question.brain_id, user_id=self.user_id, arguments=arguments
|
||||
)
|
||||
messages.append(FunctionMessage(name=brain.name, content=content))
|
||||
|
||||
response = completion(
|
||||
model=self.model,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
messages=messages,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
streamed_chat_history = update_chat_history(
|
||||
CreateChatHistory(
|
||||
**{
|
||||
"chat_id": chat_id,
|
||||
"user_message": question.question,
|
||||
"assistant": "",
|
||||
"brain_id": question.brain_id,
|
||||
"prompt_id": self.prompt_to_use_id,
|
||||
}
|
||||
)
|
||||
)
|
||||
streamed_chat_history = get_chat_history.GetChatHistoryOutput(
|
||||
**{
|
||||
"chat_id": str(chat_id),
|
||||
"message_id": streamed_chat_history.message_id,
|
||||
"message_time": streamed_chat_history.message_time,
|
||||
"user_message": question.question,
|
||||
"assistant": "",
|
||||
"prompt_title": self.prompt_to_use.title
|
||||
if self.prompt_to_use
|
||||
else None,
|
||||
"brain_name": brain.name if brain else None,
|
||||
}
|
||||
)
|
||||
|
||||
response_tokens = []
|
||||
|
||||
for chunk in response:
|
||||
new_token = chunk["choices"][0]["delta"]
|
||||
streamed_chat_history.assistant = new_token
|
||||
response_tokens.append(new_token)
|
||||
yield f"data: {json.dumps(streamed_chat_history.dict())}"
|
||||
|
||||
update_message_by_id(
|
||||
message_id=str(streamed_chat_history.message_id),
|
||||
user_message=question.question,
|
||||
assistant="".join(response_tokens),
|
||||
)
|
42
backend/llm/utils/call_brain_api.py
Normal file
42
backend/llm/utils/call_brain_api.py
Normal file
@ -0,0 +1,42 @@
|
||||
from uuid import UUID
|
||||
|
||||
from llm.utils.extract_brain_api_params_values_from_llm_output import (
|
||||
extract_brain_api_params_values_from_llm_output,
|
||||
)
|
||||
from llm.utils.make_api_request import make_api_request
|
||||
from repository.api_brain_definition.get_api_brain_definition import (
|
||||
get_api_brain_definition,
|
||||
)
|
||||
from repository.external_api_secret.read_secret import read_secret
|
||||
|
||||
|
||||
def call_brain_api(brain_id: UUID, user_id: UUID, arguments: dict):
|
||||
brain_definition = get_api_brain_definition(brain_id)
|
||||
if brain_definition is None:
|
||||
raise Exception("Brain definition not found")
|
||||
|
||||
brain_params = brain_definition.params.properties
|
||||
brain_params_values = extract_brain_api_params_values_from_llm_output(
|
||||
brain_params, arguments
|
||||
)
|
||||
|
||||
brain_search_params = brain_definition.search_params.properties
|
||||
brain_search_params_values = extract_brain_api_params_values_from_llm_output(
|
||||
brain_search_params, arguments
|
||||
)
|
||||
|
||||
secrets = brain_definition.secrets
|
||||
secrets_values = {}
|
||||
|
||||
for secret in secrets:
|
||||
secret_value = read_secret(
|
||||
user_id=user_id, brain_id=brain_id, secret_name=secret.name
|
||||
)
|
||||
secrets_values[secret.name] = secret_value
|
||||
|
||||
return make_api_request(
|
||||
api_url=brain_definition.url,
|
||||
params=brain_params_values,
|
||||
search_params=brain_search_params_values,
|
||||
secrets=secrets_values,
|
||||
)
|
@ -0,0 +1,17 @@
|
||||
from models.ApiBrainDefinition import ApiBrainDefinitionSchemaProperty
|
||||
|
||||
|
||||
def extract_brain_api_params_values_from_llm_output(
|
||||
params: list[ApiBrainDefinitionSchemaProperty], arguments: dict
|
||||
):
|
||||
params_values = {}
|
||||
|
||||
for param in params:
|
||||
if param.name in arguments:
|
||||
params_values[param.name] = arguments[param.name]
|
||||
continue
|
||||
|
||||
if param.required:
|
||||
raise Exception(f"Missing param {param.name}")
|
||||
|
||||
return params_values
|
36
backend/llm/utils/get_api_brain_definition_as_json_schema.py
Normal file
36
backend/llm/utils/get_api_brain_definition_as_json_schema.py
Normal file
@ -0,0 +1,36 @@
|
||||
from models.brain_entity import BrainEntity
|
||||
from repository.api_brain_definition.get_api_brain_definition import (
|
||||
get_api_brain_definition,
|
||||
)
|
||||
|
||||
|
||||
def get_api_brain_definition_as_json_schema(brain: BrainEntity):
|
||||
if not brain:
|
||||
raise Exception("No brain found")
|
||||
|
||||
api_brain_definition = get_api_brain_definition(brain.id)
|
||||
if not api_brain_definition:
|
||||
raise Exception("No api brain definition found")
|
||||
|
||||
required = []
|
||||
required.extend(api_brain_definition.params.required)
|
||||
required.extend(api_brain_definition.search_params.required)
|
||||
|
||||
properties = {}
|
||||
for property in api_brain_definition.params.properties:
|
||||
properties[property.name] = property
|
||||
for property in api_brain_definition.search_params.properties:
|
||||
properties[property.name] = property
|
||||
|
||||
parameters = {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required,
|
||||
}
|
||||
schema = {
|
||||
"name": brain.name,
|
||||
"description": brain.description,
|
||||
"parameters": parameters,
|
||||
}
|
||||
|
||||
return schema
|
18
backend/llm/utils/make_api_request.py
Normal file
18
backend/llm/utils/make_api_request.py
Normal file
@ -0,0 +1,18 @@
|
||||
import requests
|
||||
|
||||
|
||||
def make_api_request(api_url, params, search_params, secrets) -> str:
|
||||
headers = {}
|
||||
|
||||
api_url_with_search_params = api_url + "?"
|
||||
for search_param in search_params:
|
||||
api_url_with_search_params += f"{search_param}={search_params[search_param]}&"
|
||||
|
||||
for secret in secrets:
|
||||
headers[secret] = secrets[secret]
|
||||
|
||||
response = requests.get(
|
||||
url=api_url_with_search_params, params=params, headers=headers
|
||||
)
|
||||
|
||||
return str(response.json())
|
@ -3,10 +3,28 @@ from uuid import UUID
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ApiBrainDefinitionSchemaProperty(BaseModel):
|
||||
type: str
|
||||
description: str
|
||||
enum: list
|
||||
name: str
|
||||
required: bool
|
||||
|
||||
|
||||
class ApiBrainDefinitionSchema(BaseModel):
|
||||
properties: list[ApiBrainDefinitionSchemaProperty]
|
||||
required: list[str]
|
||||
|
||||
|
||||
class ApiBrainDefinitionSecret(BaseModel):
|
||||
name: str
|
||||
type: str
|
||||
|
||||
|
||||
class ApiBrainDefinition(BaseModel):
|
||||
brain_id: UUID
|
||||
method: str
|
||||
url: str
|
||||
params: dict
|
||||
search_params: dict
|
||||
secrets: dict
|
||||
params: ApiBrainDefinitionSchema
|
||||
search_params: ApiBrainDefinitionSchema
|
||||
secrets: list[ApiBrainDefinitionSecret]
|
||||
|
@ -1,7 +1,8 @@
|
||||
from uuid import UUID
|
||||
|
||||
from models import get_supabase_client
|
||||
from utils import build_secret_unique_name
|
||||
|
||||
from repository.external_api_secret.utils import build_secret_unique_name
|
||||
|
||||
|
||||
def create_secret(
|
||||
|
@ -1,7 +1,8 @@
|
||||
from uuid import UUID
|
||||
|
||||
from models import get_supabase_client
|
||||
from utils import build_secret_unique_name
|
||||
|
||||
from repository.external_api_secret.utils import build_secret_unique_name
|
||||
|
||||
|
||||
def delete_secret(user_id: UUID, brain_id: UUID, secret_name: str) -> bool:
|
||||
|
@ -1,11 +1,14 @@
|
||||
from uuid import UUID
|
||||
|
||||
from models import get_supabase_client
|
||||
from utils import build_secret_unique_name
|
||||
|
||||
from repository.external_api_secret.utils import build_secret_unique_name
|
||||
|
||||
|
||||
def read_secret(
|
||||
user_id: UUID, brain_id: UUID, secret_name: str, secret_value
|
||||
user_id: UUID,
|
||||
brain_id: UUID,
|
||||
secret_name: str,
|
||||
) -> UUID | None:
|
||||
supabase_client = get_supabase_client()
|
||||
response = supabase_client.rpc(
|
||||
|
@ -1,9 +1,20 @@
|
||||
from llm.api_brain_qa import APIBrainQA
|
||||
from llm.qa_base import QABaseBrainPicking
|
||||
from models.brain_entity import BrainType
|
||||
from repository.brain import get_brain_details
|
||||
from repository.brain.get_brain_by_id import get_brain_by_id
|
||||
from routes.authorizations.brain_authorization import validate_brain_authorization
|
||||
from routes.authorizations.types import RoleEnum
|
||||
from routes.chat.interface import ChatInterface
|
||||
|
||||
from repository.brain import get_brain_details
|
||||
models_supporting_function_calls = [
|
||||
"gpt-4",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-0613",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
|
||||
|
||||
class BrainfulChat(ChatInterface):
|
||||
@ -30,8 +41,28 @@ class BrainfulChat(ChatInterface):
|
||||
user_openai_api_key,
|
||||
streaming,
|
||||
prompt_id,
|
||||
user_id,
|
||||
):
|
||||
return QABaseBrainPicking(
|
||||
brain = get_brain_by_id(brain_id)
|
||||
|
||||
if not brain:
|
||||
raise Exception("No brain found")
|
||||
|
||||
if (
|
||||
brain.brain_type == BrainType.DOC
|
||||
or model not in models_supporting_function_calls
|
||||
):
|
||||
return QABaseBrainPicking(
|
||||
chat_id=chat_id,
|
||||
model=model,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
brain_id=brain_id,
|
||||
user_openai_api_key=user_openai_api_key,
|
||||
streaming=streaming,
|
||||
prompt_id=prompt_id,
|
||||
)
|
||||
return APIBrainQA(
|
||||
chat_id=chat_id,
|
||||
model=model,
|
||||
max_tokens=max_tokens,
|
||||
@ -40,4 +71,5 @@ class BrainfulChat(ChatInterface):
|
||||
user_openai_api_key=user_openai_api_key,
|
||||
streaming=streaming,
|
||||
prompt_id=prompt_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
@ -1,7 +1,6 @@
|
||||
from llm.qa_headless import HeadlessQA
|
||||
from routes.chat.interface import ChatInterface
|
||||
|
||||
from repository.user_identity import get_user_identity
|
||||
from routes.chat.interface import ChatInterface
|
||||
|
||||
|
||||
class BrainlessChat(ChatInterface):
|
||||
@ -24,6 +23,7 @@ class BrainlessChat(ChatInterface):
|
||||
user_openai_api_key,
|
||||
streaming,
|
||||
prompt_id,
|
||||
user_id,
|
||||
):
|
||||
return HeadlessQA(
|
||||
chat_id=chat_id,
|
||||
|
@ -32,6 +32,7 @@ from repository.chat.get_chat_history_with_notifications import (
|
||||
get_chat_history_with_notifications,
|
||||
)
|
||||
from repository.notification.remove_chat_notifications import remove_chat_notifications
|
||||
|
||||
from routes.chat.factory import get_chat_strategy
|
||||
from routes.chat.utils import (
|
||||
NullableUUID,
|
||||
@ -182,6 +183,7 @@ async def create_question_handler(
|
||||
user_openai_api_key=current_user.openai_api_key, # pyright: ignore reportPrivateUsage=none
|
||||
streaming=False,
|
||||
prompt_id=chat_question.prompt_id,
|
||||
user_id=current_user.id,
|
||||
)
|
||||
|
||||
chat_answer = gpt_answer_generator.generate_answer(chat_id, chat_question)
|
||||
@ -258,6 +260,7 @@ async def create_stream_question_handler(
|
||||
streaming=True,
|
||||
prompt_id=chat_question.prompt_id,
|
||||
brain_id=str(brain_id),
|
||||
user_id=current_user.id,
|
||||
)
|
||||
|
||||
return StreamingResponse(
|
||||
|
Loading…
Reference in New Issue
Block a user