Add config parameter to conversational_qa_chain (#2540)

This pull request adds a new config parameter to the
`conversational_qa_chain` function. The config parameter allows for
passing metadata, specifically the conversation ID, to the function.
This change ensures that the conversation ID is included in the metadata
when invoking the `conversational_qa_chain` function.
This commit is contained in:
Stan Girard 2024-05-04 20:05:29 +02:00 committed by GitHub
parent 3efc8ca565
commit 0dd1d12e6a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 17 additions and 8 deletions

View File

@ -61,6 +61,7 @@ class GPT4Brain(KnowledgeBrainQA):
self.initialize_streamed_chat_history(chat_id, question)
)
response_tokens = []
config = {"metadata": {"conversation_id": str(chat_id)}}
async for chunk in conversational_qa_chain.astream(
{
@ -69,7 +70,8 @@ class GPT4Brain(KnowledgeBrainQA):
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
},
config=config,
):
response_tokens.append(chunk.content)
streamed_chat_history.assistant = chunk.content

View File

@ -55,6 +55,7 @@ class ProxyBrain(KnowledgeBrainQA):
self.initialize_streamed_chat_history(chat_id, question)
)
response_tokens = []
config = {"metadata": {"conversation_id": str(chat_id)}}
async for chunk in conversational_qa_chain.astream(
{
@ -63,7 +64,8 @@ class ProxyBrain(KnowledgeBrainQA):
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
},
config=config,
):
response_tokens.append(chunk.content)
streamed_chat_history.assistant = chunk.content
@ -78,6 +80,7 @@ class ProxyBrain(KnowledgeBrainQA):
transformed_history, streamed_chat_history = (
self.initialize_streamed_chat_history(chat_id, question)
)
config = {"metadata": {"conversation_id": str(chat_id)}}
model_response = conversational_qa_chain.invoke(
{
"question": question.question,
@ -85,7 +88,8 @@ class ProxyBrain(KnowledgeBrainQA):
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
},
config=config,
)
answer = model_response.content

View File

@ -255,6 +255,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
metadata = self.metadata or {}
citations = None
answer = ""
config = {"metadata": {"conversation_id": str(chat_id)}}
model_response = conversational_qa_chain.invoke(
{
"question": question.question,
@ -262,7 +264,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
},
config=config,
)
if self.model_compatible_with_function_calling(model=self.model):
@ -294,6 +297,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
sources = []
citations = []
first = True
config = {"metadata": {"conversation_id": str(chat_id)}}
async for chunk in conversational_qa_chain.astream(
{
"question": question.question,
@ -301,7 +306,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
"custom_personality": (
self.prompt_to_use.content if self.prompt_to_use else None
),
}
},
config=config,
):
if not streamed_chat_history.metadata:
streamed_chat_history.metadata = {}

View File

@ -4,9 +4,6 @@ export const useAuthModes = () => {
"password",
];
console.log('Environment Variable NEXT_PUBLIC_AUTH_MODES:', process.env.NEXT_PUBLIC_AUTH_MODES);
console.log('authModes:', authModes);
return {
magicLink: authModes.includes("magic_link"),
password: authModes.includes("password"),