diff --git a/backend/modules/brain/integrations/Big/Brain.py b/backend/modules/brain/integrations/Big/Brain.py index 0a9bad12b..beec72272 100644 --- a/backend/modules/brain/integrations/Big/Brain.py +++ b/backend/modules/brain/integrations/Big/Brain.py @@ -19,21 +19,35 @@ logger = get_logger(__name__) class BigBrain(KnowledgeBrainQA): - """This is the Big brain class. + """ + The BigBrain class integrates advanced conversational retrieval and language model chains + to provide comprehensive and context-aware responses to user queries. - Args: - KnowledgeBrainQA (_type_): A brain that store the knowledge internaly + It leverages a combination of document retrieval, question condensation, and document-based + question answering to generate responses that are informed by a wide range of knowledge sources. """ def __init__( self, **kwargs, ): + """ + Initializes the BigBrain class with specific configurations. + + Args: + **kwargs: Arbitrary keyword arguments. + """ super().__init__( **kwargs, ) def get_chain(self): + """ + Constructs and returns the conversational QA chain used by BigBrain. + + Returns: + A ConversationalRetrievalChain instance. + """ system_template = """Combine these summaries in a way that makes sense and answer the user's question. Use markdown or any other techniques to display the content in a nice and aerated way. Answer in the language of the question. Here are user instructions on how to respond: {custom_personality} @@ -97,6 +111,17 @@ class BigBrain(KnowledgeBrainQA): async def generate_stream( self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True ) -> AsyncIterable: + """ + Generates a stream of responses for a given question in real-time. + + Args: + chat_id (UUID): The unique identifier for the chat session. + question (ChatQuestion): The question object containing the user's query. + save_answer (bool): Flag indicating whether to save the answer to the chat history. + + Returns: + An asynchronous iterable of response strings. + """ conversational_qa_chain = self.get_chain() transformed_history, streamed_chat_history = ( self.initialize_streamed_chat_history(chat_id, question) diff --git a/backend/modules/brain/integrations/Claude/Brain.py b/backend/modules/brain/integrations/Claude/Brain.py index c9865573c..7b831665f 100644 --- a/backend/modules/brain/integrations/Claude/Brain.py +++ b/backend/modules/brain/integrations/Claude/Brain.py @@ -9,26 +9,43 @@ from modules.chat.dto.chats import ChatQuestion class ClaudeBrain(KnowledgeBrainQA): - """This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally. - It is going to call the Data Store internally to get the data. - - Args: - KnowledgeBrainQA (_type_): A brain that store the knowledge internaly """ + ClaudeBrain integrates with Claude model to provide conversational AI capabilities. + It leverages the Claude model for generating responses based on the provided context. + Attributes: + **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. + """ def __init__( self, **kwargs, ): + """ + Initializes the ClaudeBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ super().__init__( **kwargs, ) def calculate_pricing(self): + """ + Calculates the pricing for using the ClaudeBrain. + + Returns: + int: The pricing value. + """ return 3 def get_chain(self): + """ + Constructs and returns the conversational chain for ClaudeBrain. + Returns: + A conversational chain object. + """ prompt = ChatPromptTemplate.from_messages( [ ( @@ -49,6 +66,17 @@ class ClaudeBrain(KnowledgeBrainQA): async def generate_stream( self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True ) -> AsyncIterable: + """ + Generates a stream of responses for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Yields: + AsyncIterable: A stream of response strings. + """ conversational_qa_chain = self.get_chain() transformed_history, streamed_chat_history = ( self.initialize_streamed_chat_history(chat_id, question) diff --git a/backend/modules/brain/integrations/GPT4/Brain.py b/backend/modules/brain/integrations/GPT4/Brain.py index f7ad10369..1ba63c2b8 100644 --- a/backend/modules/brain/integrations/GPT4/Brain.py +++ b/backend/modules/brain/integrations/GPT4/Brain.py @@ -27,8 +27,6 @@ class AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], operator.add] -# Define the function that determines whether to continue or not - logger = get_logger(__name__) chat_service = ChatService() diff --git a/backend/modules/brain/integrations/Notion/Brain.py b/backend/modules/brain/integrations/Notion/Brain.py index fa5b91a61..add2592ab 100644 --- a/backend/modules/brain/integrations/Notion/Brain.py +++ b/backend/modules/brain/integrations/Notion/Brain.py @@ -2,17 +2,24 @@ from modules.brain.knowledge_brain_qa import KnowledgeBrainQA class NotionBrain(KnowledgeBrainQA): - """This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally. - It is going to call the Data Store internally to get the data. + """ + NotionBrain integrates with Notion to provide knowledge-based responses. + It leverages data stored in Notion to answer user queries. - Args: - KnowledgeBrainQA (_type_): A brain that store the knowledge internaly + Attributes: + **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. """ def __init__( self, **kwargs, ): + """ + Initializes the NotionBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ super().__init__( **kwargs, ) diff --git a/backend/modules/brain/integrations/Proxy/Brain.py b/backend/modules/brain/integrations/Proxy/Brain.py index a000dcb84..4a085181e 100644 --- a/backend/modules/brain/integrations/Proxy/Brain.py +++ b/backend/modules/brain/integrations/Proxy/Brain.py @@ -16,22 +16,32 @@ chat_service = ChatService() class ProxyBrain(KnowledgeBrainQA): - """This is the Proxy brain class. - - Args: - KnowledgeBrainQA (_type_): A brain that store the knowledge internaly + """ + ProxyBrain class serves as a proxy to utilize various language models for generating responses. + It dynamically selects and uses the appropriate language model based on the provided context and question. """ def __init__( self, **kwargs, ): + """ + Initializes the ProxyBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ super().__init__( **kwargs, ) def get_chain(self): + """ + Constructs and returns the conversational chain for ProxyBrain. + Returns: + A conversational chain object. + """ prompt = ChatPromptTemplate.from_messages( [ ( @@ -50,6 +60,17 @@ class ProxyBrain(KnowledgeBrainQA): async def generate_stream( self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True ) -> AsyncIterable: + """ + Generates a stream of responses for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Yields: + AsyncIterable: A stream of response strings. + """ conversational_qa_chain = self.get_chain() transformed_history, streamed_chat_history = ( self.initialize_streamed_chat_history(chat_id, question) @@ -76,6 +97,17 @@ class ProxyBrain(KnowledgeBrainQA): def generate_answer( self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True ) -> GetChatHistoryOutput: + """ + Generates a non-streaming answer for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Returns: + GetChatHistoryOutput: The chat history output object containing the answer. + """ conversational_qa_chain = self.get_chain() transformed_history, streamed_chat_history = ( self.initialize_streamed_chat_history(chat_id, question)