quivr/backend/modules/brain/integrations/Big/Brain.py
Stan Girard b60a31e561
docs: Add docstrings to integration brains (#2582)
This pull request adds comprehensive docstrings to the Brain classes
within the `backend/modules/brain/integrations` directory, enhancing
code documentation and readability. The changes include:

- **BigBrain (`Big/Brain.py`)**: Adds a class-level docstring explaining
the purpose and functionality of the BigBrain class, along with
method-level docstrings detailing the operations performed by each
method.
- **ClaudeBrain (`Claude/Brain.py`)**: Introduces a class-level
docstring that describes the ClaudeBrain class's integration with the
Claude model for conversational AI capabilities, and method-level
docstrings that clarify the purpose of each method.
- **GPT4Brain (`GPT4/Brain.py`)**: Updates include a detailed
class-level docstring outlining the GPT4Brain's integration with GPT-4
for real-time answers and tool support, along with method-level
docstrings explaining the functionality of each method.
- **NotionBrain (`Notion/Brain.py`)**: Adds a class-level docstring that
describes the NotionBrain's role in leveraging Notion data for
knowledge-based responses.
- **ProxyBrain (`Proxy/Brain.py`)**: Incorporates a class-level
docstring explaining the ProxyBrain's function as a dynamic language
model selector and method-level docstrings detailing the operations of
each method.

These additions ensure that each Brain class and its methods are
well-documented, providing clear insights into their purposes and
functionalities.


---

For more details, open the [Copilot Workspace
session](https://copilot-workspace.githubnext.com/QuivrHQ/quivr?shareId=b4e301ad-828e-4424-95ec-6e378d5d3849).
2024-05-10 14:46:55 -07:00

146 lines
5.4 KiB
Python

import json
from typing import AsyncIterable
from uuid import UUID
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from logger import get_logger
from modules.brain.knowledge_brain_qa import KnowledgeBrainQA
from modules.chat.dto.chats import ChatQuestion
logger = get_logger(__name__)
class BigBrain(KnowledgeBrainQA):
"""
The BigBrain class integrates advanced conversational retrieval and language model chains
to provide comprehensive and context-aware responses to user queries.
It leverages a combination of document retrieval, question condensation, and document-based
question answering to generate responses that are informed by a wide range of knowledge sources.
"""
def __init__(
self,
**kwargs,
):
"""
Initializes the BigBrain class with specific configurations.
Args:
**kwargs: Arbitrary keyword arguments.
"""
super().__init__(
**kwargs,
)
def get_chain(self):
"""
Constructs and returns the conversational QA chain used by BigBrain.
Returns:
A ConversationalRetrievalChain instance.
"""
system_template = """Combine these summaries in a way that makes sense and answer the user's question.
Use markdown or any other techniques to display the content in a nice and aerated way. Answer in the language of the question.
Here are user instructions on how to respond: {custom_personality}
______________________
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages)
### Question prompt
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
Return any relevant text verbatim. Return the answer in the same language as the question. If the answer is not in the text, just say nothing in the same language as the question.
{context}
Question: {question}
Relevant text, if any, else say Nothing:"""
QUESTION_PROMPT = PromptTemplate(
template=question_prompt_template, input_variables=["context", "question"]
)
### Condense Question Prompt
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question in exactly the same language as the original question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question in same language as question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
api_base = None
if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"):
api_base = self.brain_settings.ollama_api_base_url
llm = ChatLiteLLM(
temperature=0,
model=self.model,
api_base=api_base,
max_tokens=self.max_tokens,
)
retriever_doc = self.knowledge_qa.get_retriever()
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(
llm,
chain_type="map_reduce",
question_prompt=QUESTION_PROMPT,
combine_prompt=CHAT_COMBINE_PROMPT,
)
chain = ConversationalRetrievalChain(
retriever=retriever_doc,
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
return chain
async def generate_stream(
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
) -> AsyncIterable:
"""
Generates a stream of responses for a given question in real-time.
Args:
chat_id (UUID): The unique identifier for the chat session.
question (ChatQuestion): The question object containing the user's query.
save_answer (bool): Flag indicating whether to save the answer to the chat history.
Returns:
An asynchronous iterable of response strings.
"""
conversational_qa_chain = self.get_chain()
transformed_history, streamed_chat_history = (
self.initialize_streamed_chat_history(chat_id, question)
)
response_tokens = []
async for chunk in conversational_qa_chain.astream(
{
"question": question.question,
"chat_history": transformed_history,
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
):
if "answer" in chunk:
response_tokens.append(chunk["answer"])
streamed_chat_history.assistant = chunk["answer"]
yield f"data: {json.dumps(streamed_chat_history.dict())}"
self.save_answer(question, response_tokens, streamed_chat_history, save_answer)