Revert "feat(chat): Add follow up questions functionality" (#2246)

Reverts QuivrHQ/quivr#2241
This commit is contained in:
Stan Girard 2024-02-22 15:35:39 -08:00 committed by GitHub
parent 44312dc617
commit 81072b3841
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 16 additions and 18 deletions

View File

@ -181,8 +181,6 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
brain_id=brain_id, brain_id=brain_id,
chat_id=chat_id, chat_id=chat_id,
streaming=streaming, streaming=streaming,
max_input=self.max_input,
max_tokens=self.max_tokens,
**kwargs, **kwargs,
) )

View File

@ -5,17 +5,14 @@ from uuid import UUID
from langchain.chains import ConversationalRetrievalChain from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings.ollama import OllamaEmbeddings from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.llms.base import BaseLLM from langchain.llms.base import BaseLLM
from langchain.memory import ConversationBufferMemory
from langchain.prompts import HumanMessagePromptTemplate from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema import format_document from langchain.schema import format_document
from langchain_community.chat_models import ChatLiteLLM from langchain_community.chat_models import ChatLiteLLM
from langchain_core.messages import SystemMessage from langchain_core.messages import SystemMessage, get_buffer_string
from langchain_core.output_parsers import StrOutputParser from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ( from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
ChatPromptTemplate, from langchain_core.runnables import RunnableLambda, RunnablePassthrough
MessagesPlaceholder,
PromptTemplate,
)
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
from llm.utils.get_prompt_to_use import get_prompt_to_use from llm.utils.get_prompt_to_use import get_prompt_to_use
from logger import get_logger from logger import get_logger
@ -56,7 +53,6 @@ ANSWER_PROMPT = ChatPromptTemplate.from_messages(
"When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context from files provided by the user to answer the users question in the same language as the user question. Your name is Quivr. You're a helpful assistant. If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer." "When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context from files provided by the user to answer the users question in the same language as the user question. Your name is Quivr. You're a helpful assistant. If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer."
) )
), ),
MessagesPlaceholder(variable_name="chat_history", optional=False),
HumanMessagePromptTemplate.from_template(template_answer), HumanMessagePromptTemplate.from_template(template_answer),
] ]
) )
@ -205,17 +201,23 @@ class QuivrRAG(BaseModel):
def get_chain(self): def get_chain(self):
retriever_doc = self.get_retriever() retriever_doc = self.get_retriever()
memory = ConversationBufferMemory(
return_messages=True, output_key="answer", input_key="question"
)
loaded_memory = RunnablePassthrough.assign(
chat_history=RunnableLambda(memory.load_memory_variables)
| itemgetter("history"),
)
_inputs = RunnablePassthrough()
standalone_question = { standalone_question = {
"standalone_question": { "standalone_question": {
"question": itemgetter("question"), "question": lambda x: x["question"],
"chat_history": itemgetter("chat_history"), "chat_history": lambda x: get_buffer_string(x["chat_history"]),
} }
| CONDENSE_QUESTION_PROMPT | CONDENSE_QUESTION_PROMPT
| ChatLiteLLM(temperature=0, model=self.model) | ChatLiteLLM(temperature=0, model=self.model)
| StrOutputParser(), | StrOutputParser(),
"chat_history": itemgetter("chat_history"),
} }
prompt_custom_user = self.prompt_to_use() prompt_custom_user = self.prompt_to_use()
@ -228,14 +230,12 @@ class QuivrRAG(BaseModel):
"docs": itemgetter("standalone_question") | retriever_doc, "docs": itemgetter("standalone_question") | retriever_doc,
"question": lambda x: x["standalone_question"], "question": lambda x: x["standalone_question"],
"custom_instructions": lambda x: prompt_to_use, "custom_instructions": lambda x: prompt_to_use,
"chat_history": itemgetter("chat_history"),
} }
final_inputs = { final_inputs = {
"context": lambda x: self._combine_documents(x["docs"]), "context": lambda x: self._combine_documents(x["docs"]),
"question": itemgetter("question"), "question": itemgetter("question"),
"custom_instructions": itemgetter("custom_instructions"), "custom_instructions": itemgetter("custom_instructions"),
"chat_history": itemgetter("chat_history"),
} }
# And finally, we do the part that returns the answers # And finally, we do the part that returns the answers
@ -246,4 +246,4 @@ class QuivrRAG(BaseModel):
"docs": itemgetter("docs"), "docs": itemgetter("docs"),
} }
return _inputs | standalone_question | retrieved_documents | answer return loaded_memory | standalone_question | retrieved_documents | answer

View File

@ -4,7 +4,7 @@ from uuid import UUID
from modules.chat.dto.outputs import GetChatHistoryOutput from modules.chat.dto.outputs import GetChatHistoryOutput
from modules.notification.entity.notification import Notification from modules.notification.entity.notification import Notification
from pydantic import BaseModel from pydantic import BaseModel, ConfigDict
class ChatMessage(BaseModel): class ChatMessage(BaseModel):