mirror of
https://github.com/QuivrHQ/quivr.git
synced 2024-12-14 17:03:29 +03:00
docs: Add docstrings to integration brains (#2582)
This pull request adds comprehensive docstrings to the Brain classes within the `backend/modules/brain/integrations` directory, enhancing code documentation and readability. The changes include: - **BigBrain (`Big/Brain.py`)**: Adds a class-level docstring explaining the purpose and functionality of the BigBrain class, along with method-level docstrings detailing the operations performed by each method. - **ClaudeBrain (`Claude/Brain.py`)**: Introduces a class-level docstring that describes the ClaudeBrain class's integration with the Claude model for conversational AI capabilities, and method-level docstrings that clarify the purpose of each method. - **GPT4Brain (`GPT4/Brain.py`)**: Updates include a detailed class-level docstring outlining the GPT4Brain's integration with GPT-4 for real-time answers and tool support, along with method-level docstrings explaining the functionality of each method. - **NotionBrain (`Notion/Brain.py`)**: Adds a class-level docstring that describes the NotionBrain's role in leveraging Notion data for knowledge-based responses. - **ProxyBrain (`Proxy/Brain.py`)**: Incorporates a class-level docstring explaining the ProxyBrain's function as a dynamic language model selector and method-level docstrings detailing the operations of each method. These additions ensure that each Brain class and its methods are well-documented, providing clear insights into their purposes and functionalities. --- For more details, open the [Copilot Workspace session](https://copilot-workspace.githubnext.com/QuivrHQ/quivr?shareId=b4e301ad-828e-4424-95ec-6e378d5d3849).
This commit is contained in:
parent
bbcca2a8fe
commit
b60a31e561
@ -19,21 +19,35 @@ logger = get_logger(__name__)
|
||||
|
||||
|
||||
class BigBrain(KnowledgeBrainQA):
|
||||
"""This is the Big brain class.
|
||||
"""
|
||||
The BigBrain class integrates advanced conversational retrieval and language model chains
|
||||
to provide comprehensive and context-aware responses to user queries.
|
||||
|
||||
Args:
|
||||
KnowledgeBrainQA (_type_): A brain that store the knowledge internaly
|
||||
It leverages a combination of document retrieval, question condensation, and document-based
|
||||
question answering to generate responses that are informed by a wide range of knowledge sources.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the BigBrain class with specific configurations.
|
||||
|
||||
Args:
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
super().__init__(
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_chain(self):
|
||||
"""
|
||||
Constructs and returns the conversational QA chain used by BigBrain.
|
||||
|
||||
Returns:
|
||||
A ConversationalRetrievalChain instance.
|
||||
"""
|
||||
system_template = """Combine these summaries in a way that makes sense and answer the user's question.
|
||||
Use markdown or any other techniques to display the content in a nice and aerated way. Answer in the language of the question.
|
||||
Here are user instructions on how to respond: {custom_personality}
|
||||
@ -97,6 +111,17 @@ class BigBrain(KnowledgeBrainQA):
|
||||
async def generate_stream(
|
||||
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
|
||||
) -> AsyncIterable:
|
||||
"""
|
||||
Generates a stream of responses for a given question in real-time.
|
||||
|
||||
Args:
|
||||
chat_id (UUID): The unique identifier for the chat session.
|
||||
question (ChatQuestion): The question object containing the user's query.
|
||||
save_answer (bool): Flag indicating whether to save the answer to the chat history.
|
||||
|
||||
Returns:
|
||||
An asynchronous iterable of response strings.
|
||||
"""
|
||||
conversational_qa_chain = self.get_chain()
|
||||
transformed_history, streamed_chat_history = (
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
|
@ -9,26 +9,43 @@ from modules.chat.dto.chats import ChatQuestion
|
||||
|
||||
|
||||
class ClaudeBrain(KnowledgeBrainQA):
|
||||
"""This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally.
|
||||
It is going to call the Data Store internally to get the data.
|
||||
|
||||
Args:
|
||||
KnowledgeBrainQA (_type_): A brain that store the knowledge internaly
|
||||
"""
|
||||
ClaudeBrain integrates with Claude model to provide conversational AI capabilities.
|
||||
It leverages the Claude model for generating responses based on the provided context.
|
||||
|
||||
Attributes:
|
||||
**kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the ClaudeBrain with the given arguments.
|
||||
|
||||
Args:
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
super().__init__(
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def calculate_pricing(self):
|
||||
"""
|
||||
Calculates the pricing for using the ClaudeBrain.
|
||||
|
||||
Returns:
|
||||
int: The pricing value.
|
||||
"""
|
||||
return 3
|
||||
|
||||
def get_chain(self):
|
||||
"""
|
||||
Constructs and returns the conversational chain for ClaudeBrain.
|
||||
|
||||
Returns:
|
||||
A conversational chain object.
|
||||
"""
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
@ -49,6 +66,17 @@ class ClaudeBrain(KnowledgeBrainQA):
|
||||
async def generate_stream(
|
||||
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
|
||||
) -> AsyncIterable:
|
||||
"""
|
||||
Generates a stream of responses for the given question.
|
||||
|
||||
Args:
|
||||
chat_id (UUID): The chat session ID.
|
||||
question (ChatQuestion): The question object.
|
||||
save_answer (bool): Whether to save the answer.
|
||||
|
||||
Yields:
|
||||
AsyncIterable: A stream of response strings.
|
||||
"""
|
||||
conversational_qa_chain = self.get_chain()
|
||||
transformed_history, streamed_chat_history = (
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
|
@ -27,8 +27,6 @@ class AgentState(TypedDict):
|
||||
messages: Annotated[Sequence[BaseMessage], operator.add]
|
||||
|
||||
|
||||
# Define the function that determines whether to continue or not
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
chat_service = ChatService()
|
||||
|
@ -2,17 +2,24 @@ from modules.brain.knowledge_brain_qa import KnowledgeBrainQA
|
||||
|
||||
|
||||
class NotionBrain(KnowledgeBrainQA):
|
||||
"""This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally.
|
||||
It is going to call the Data Store internally to get the data.
|
||||
"""
|
||||
NotionBrain integrates with Notion to provide knowledge-based responses.
|
||||
It leverages data stored in Notion to answer user queries.
|
||||
|
||||
Args:
|
||||
KnowledgeBrainQA (_type_): A brain that store the knowledge internaly
|
||||
Attributes:
|
||||
**kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the NotionBrain with the given arguments.
|
||||
|
||||
Args:
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
super().__init__(
|
||||
**kwargs,
|
||||
)
|
||||
|
@ -16,22 +16,32 @@ chat_service = ChatService()
|
||||
|
||||
|
||||
class ProxyBrain(KnowledgeBrainQA):
|
||||
"""This is the Proxy brain class.
|
||||
|
||||
Args:
|
||||
KnowledgeBrainQA (_type_): A brain that store the knowledge internaly
|
||||
"""
|
||||
ProxyBrain class serves as a proxy to utilize various language models for generating responses.
|
||||
It dynamically selects and uses the appropriate language model based on the provided context and question.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the ProxyBrain with the given arguments.
|
||||
|
||||
Args:
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
super().__init__(
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_chain(self):
|
||||
"""
|
||||
Constructs and returns the conversational chain for ProxyBrain.
|
||||
|
||||
Returns:
|
||||
A conversational chain object.
|
||||
"""
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
@ -50,6 +60,17 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
async def generate_stream(
|
||||
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
|
||||
) -> AsyncIterable:
|
||||
"""
|
||||
Generates a stream of responses for the given question.
|
||||
|
||||
Args:
|
||||
chat_id (UUID): The chat session ID.
|
||||
question (ChatQuestion): The question object.
|
||||
save_answer (bool): Whether to save the answer.
|
||||
|
||||
Yields:
|
||||
AsyncIterable: A stream of response strings.
|
||||
"""
|
||||
conversational_qa_chain = self.get_chain()
|
||||
transformed_history, streamed_chat_history = (
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
@ -76,6 +97,17 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
def generate_answer(
|
||||
self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True
|
||||
) -> GetChatHistoryOutput:
|
||||
"""
|
||||
Generates a non-streaming answer for the given question.
|
||||
|
||||
Args:
|
||||
chat_id (UUID): The chat session ID.
|
||||
question (ChatQuestion): The question object.
|
||||
save_answer (bool): Whether to save the answer.
|
||||
|
||||
Returns:
|
||||
GetChatHistoryOutput: The chat history output object containing the answer.
|
||||
"""
|
||||
conversational_qa_chain = self.get_chain()
|
||||
transformed_history, streamed_chat_history = (
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
|
Loading…
Reference in New Issue
Block a user