mirror of
https://github.com/StanGirard/quivr.git
synced 2024-11-23 04:17:48 +03:00
Add config parameter to conversational_qa_chain (#2540)
This pull request adds a new config parameter to the `conversational_qa_chain` function. The config parameter allows for passing metadata, specifically the conversation ID, to the function. This change ensures that the conversation ID is included in the metadata when invoking the `conversational_qa_chain` function.
This commit is contained in:
parent
3efc8ca565
commit
0dd1d12e6a
@ -61,6 +61,7 @@ class GPT4Brain(KnowledgeBrainQA):
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
)
|
||||
response_tokens = []
|
||||
config = {"metadata": {"conversation_id": str(chat_id)}}
|
||||
|
||||
async for chunk in conversational_qa_chain.astream(
|
||||
{
|
||||
@ -69,7 +70,8 @@ class GPT4Brain(KnowledgeBrainQA):
|
||||
"custom_personality": (
|
||||
self.prompt_to_use.content if self.prompt_to_use else None
|
||||
),
|
||||
}
|
||||
},
|
||||
config=config,
|
||||
):
|
||||
response_tokens.append(chunk.content)
|
||||
streamed_chat_history.assistant = chunk.content
|
||||
|
@ -55,6 +55,7 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
)
|
||||
response_tokens = []
|
||||
config = {"metadata": {"conversation_id": str(chat_id)}}
|
||||
|
||||
async for chunk in conversational_qa_chain.astream(
|
||||
{
|
||||
@ -63,7 +64,8 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
"custom_personality": (
|
||||
self.prompt_to_use.content if self.prompt_to_use else None
|
||||
),
|
||||
}
|
||||
},
|
||||
config=config,
|
||||
):
|
||||
response_tokens.append(chunk.content)
|
||||
streamed_chat_history.assistant = chunk.content
|
||||
@ -78,6 +80,7 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
transformed_history, streamed_chat_history = (
|
||||
self.initialize_streamed_chat_history(chat_id, question)
|
||||
)
|
||||
config = {"metadata": {"conversation_id": str(chat_id)}}
|
||||
model_response = conversational_qa_chain.invoke(
|
||||
{
|
||||
"question": question.question,
|
||||
@ -85,7 +88,8 @@ class ProxyBrain(KnowledgeBrainQA):
|
||||
"custom_personality": (
|
||||
self.prompt_to_use.content if self.prompt_to_use else None
|
||||
),
|
||||
}
|
||||
},
|
||||
config=config,
|
||||
)
|
||||
|
||||
answer = model_response.content
|
||||
|
@ -255,6 +255,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
|
||||
metadata = self.metadata or {}
|
||||
citations = None
|
||||
answer = ""
|
||||
config = {"metadata": {"conversation_id": str(chat_id)}}
|
||||
|
||||
model_response = conversational_qa_chain.invoke(
|
||||
{
|
||||
"question": question.question,
|
||||
@ -262,7 +264,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
|
||||
"custom_personality": (
|
||||
self.prompt_to_use.content if self.prompt_to_use else None
|
||||
),
|
||||
}
|
||||
},
|
||||
config=config,
|
||||
)
|
||||
|
||||
if self.model_compatible_with_function_calling(model=self.model):
|
||||
@ -294,6 +297,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
|
||||
sources = []
|
||||
citations = []
|
||||
first = True
|
||||
config = {"metadata": {"conversation_id": str(chat_id)}}
|
||||
|
||||
async for chunk in conversational_qa_chain.astream(
|
||||
{
|
||||
"question": question.question,
|
||||
@ -301,7 +306,8 @@ class KnowledgeBrainQA(BaseModel, QAInterface):
|
||||
"custom_personality": (
|
||||
self.prompt_to_use.content if self.prompt_to_use else None
|
||||
),
|
||||
}
|
||||
},
|
||||
config=config,
|
||||
):
|
||||
if not streamed_chat_history.metadata:
|
||||
streamed_chat_history.metadata = {}
|
||||
|
@ -4,9 +4,6 @@ export const useAuthModes = () => {
|
||||
"password",
|
||||
];
|
||||
|
||||
console.log('Environment Variable NEXT_PUBLIC_AUTH_MODES:', process.env.NEXT_PUBLIC_AUTH_MODES);
|
||||
console.log('authModes:', authModes);
|
||||
|
||||
return {
|
||||
magicLink: authModes.includes("magic_link"),
|
||||
password: authModes.includes("password"),
|
||||
|
Loading…
Reference in New Issue
Block a user