diff --git a/backend/modules/brain/knowledge_brain_qa.py b/backend/modules/brain/knowledge_brain_qa.py index 7e64beb1f..1ff26fa04 100644 --- a/backend/modules/brain/knowledge_brain_qa.py +++ b/backend/modules/brain/knowledge_brain_qa.py @@ -305,8 +305,16 @@ class KnowledgeBrainQA(BaseModel, QAInterface): if self.model_compatible_with_function_calling(model=self.model): if model_response["answer"].tool_calls: citations = model_response["answer"].tool_calls[-1]["args"]["citations"] + followup_questions = model_response["answer"].tool_calls[-1]["args"][ + "followup_questions" + ] + thoughts = model_response["answer"].tool_calls[-1]["args"]["thoughts"] if citations: citations = citations + if followup_questions: + metadata["followup_questions"] = followup_questions + if thoughts: + metadata["thoughts"] = thoughts answer = model_response["answer"].tool_calls[-1]["args"]["answer"] else: answer = model_response["answer"].content @@ -370,6 +378,24 @@ class KnowledgeBrainQA(BaseModel, QAInterface): and "citations" in gathered.tool_calls[-1]["args"] ): citations = gathered.tool_calls[-1]["args"]["citations"] + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "followup_questions" in gathered.tool_calls[-1]["args"] + ): + followup_questions = gathered.tool_calls[-1]["args"][ + "followup_questions" + ] + streamed_chat_history.metadata["followup_questions"] = ( + followup_questions + ) + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "thoughts" in gathered.tool_calls[-1]["args"] + ): + thoughts = gathered.tool_calls[-1]["args"]["thoughts"] + streamed_chat_history.metadata["thoughts"] = thoughts else: if chunk.get("answer"): response_tokens += chunk["answer"].content diff --git a/backend/modules/brain/rags/quivr_rag.py b/backend/modules/brain/rags/quivr_rag.py index 7af9c33d3..29005412e 100644 --- a/backend/modules/brain/rags/quivr_rag.py +++ b/backend/modules/brain/rags/quivr_rag.py @@ -1,3 +1,4 @@ +import datetime import os from operator import itemgetter from typing import List, Optional @@ -44,9 +45,18 @@ class cited_answer(BaseModelV1): description="The integer IDs of the SPECIFIC sources which justify the answer.", ) + thoughts: str = FieldV1( + ..., + description="Explain shortly what you did to generate the answer. Explain any assumptions you made, and why you made them.", + ) + followup_questions: List[str] = FieldV1( + ..., + description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", + ) + # First step is to create the Rephrasing Prompt -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. Chat History: {chat_history} @@ -64,11 +74,20 @@ User Question: {question} Answer: """ -system_message_template = """ -When answering use markdown to make it concise and neat. -Use the following pieces of context from files provided by the user that are store in a brain to answer the users question in the same language as the user question. Your name is Quivr. You're a helpful assistant. +today_date = datetime.datetime.now().strftime("%B %d, %Y") + +system_message_template = ( + f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." +) + +system_message_template += """ +When answering use markdown neat. +Answer in a concise and clear manner. +Use the following pieces of context from files provided by the user to answer the users. +Answer in the same language as the user question. If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. -User instruction to follow if provided to answer: {custom_instructions} +If not None, User instruction to follow to answer: {custom_instructions} +Don't cite the source id in the answer objects, but you can use the source to answer the question. """