quivr/backend/modules/brain/integrations/Proxy/Brain.py
Stan Girard b60a31e561
docs: Add docstrings to integration brains (#2582)
This pull request adds comprehensive docstrings to the Brain classes
within the `backend/modules/brain/integrations` directory, enhancing
code documentation and readability. The changes include:

- **BigBrain (`Big/Brain.py`)**: Adds a class-level docstring explaining
the purpose and functionality of the BigBrain class, along with
method-level docstrings detailing the operations performed by each
method.
- **ClaudeBrain (`Claude/Brain.py`)**: Introduces a class-level
docstring that describes the ClaudeBrain class's integration with the
Claude model for conversational AI capabilities, and method-level
docstrings that clarify the purpose of each method.
- **GPT4Brain (`GPT4/Brain.py`)**: Updates include a detailed
class-level docstring outlining the GPT4Brain's integration with GPT-4
for real-time answers and tool support, along with method-level
docstrings explaining the functionality of each method.
- **NotionBrain (`Notion/Brain.py`)**: Adds a class-level docstring that
describes the NotionBrain's role in leveraging Notion data for
knowledge-based responses.
- **ProxyBrain (`Proxy/Brain.py`)**: Incorporates a class-level
docstring explaining the ProxyBrain's function as a dynamic language
model selector and method-level docstrings detailing the operations of
each method.

These additions ensure that each Brain class and its methods are
well-documented, providing clear insights into their purposes and
functionalities.


---

For more details, open the [Copilot Workspace
session](https://copilot-workspace.githubnext.com/QuivrHQ/quivr?shareId=b4e301ad-828e-4424-95ec-6e378d5d3849).
2024-05-10 14:46:55 -07:00

134 lines
4.3 KiB
Python

import json
from typing import AsyncIterable
from uuid import UUID
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from logger import get_logger
from modules.brain.knowledge_brain_qa import KnowledgeBrainQA
from modules.chat.dto.chats import ChatQuestion
from modules.chat.dto.outputs import GetChatHistoryOutput
from modules.chat.service.chat_service import ChatService
logger = get_logger(__name__)
chat_service = ChatService()
class ProxyBrain(KnowledgeBrainQA):
"""
ProxyBrain class serves as a proxy to utilize various language models for generating responses.
It dynamically selects and uses the appropriate language model based on the provided context and question.
"""
def __init__(
self,
**kwargs,
):
"""
Initializes the ProxyBrain with the given arguments.
Args:
**kwargs: Arbitrary keyword arguments.
"""
super().__init__(
**kwargs,
)
def get_chain(self):
"""
Constructs and returns the conversational chain for ProxyBrain.
Returns:
A conversational chain object.
"""
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are Quivr. You are an assistant. {custom_personality}",
),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{question}"),
]
)
chain = prompt | ChatLiteLLM(model=self.model, max_tokens=self.max_tokens)
return chain
async def generate_stream(
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
) -> AsyncIterable:
"""
Generates a stream of responses for the given question.
Args:
chat_id (UUID): The chat session ID.
question (ChatQuestion): The question object.
save_answer (bool): Whether to save the answer.
Yields:
AsyncIterable: A stream of response strings.
"""
conversational_qa_chain = self.get_chain()
transformed_history, streamed_chat_history = (
self.initialize_streamed_chat_history(chat_id, question)
)
response_tokens = []
config = {"metadata": {"conversation_id": str(chat_id)}}
async for chunk in conversational_qa_chain.astream(
{
"question": question.question,
"chat_history": transformed_history,
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
},
config=config,
):
response_tokens.append(chunk.content)
streamed_chat_history.assistant = chunk.content
yield f"data: {json.dumps(streamed_chat_history.dict())}"
self.save_answer(question, response_tokens, streamed_chat_history, save_answer)
def generate_answer(
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
) -> GetChatHistoryOutput:
"""
Generates a non-streaming answer for the given question.
Args:
chat_id (UUID): The chat session ID.
question (ChatQuestion): The question object.
save_answer (bool): Whether to save the answer.
Returns:
GetChatHistoryOutput: The chat history output object containing the answer.
"""
conversational_qa_chain = self.get_chain()
transformed_history, streamed_chat_history = (
self.initialize_streamed_chat_history(chat_id, question)
)
config = {"metadata": {"conversation_id": str(chat_id)}}
model_response = conversational_qa_chain.invoke(
{
"question": question.question,
"chat_history": transformed_history,
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
},
config=config,
)
answer = model_response.content
return self.save_non_streaming_answer(
chat_id=chat_id,
question=question,
answer=answer,
)