quivr/backend/core/quivr_core/prompts.py

120 lines
4.5 KiB
Python
Raw Normal View History

import datetime
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
PromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.base import BasePromptTemplate
from pydantic import ConfigDict, create_model
class CustomPromptsDict(dict):
def __init__(self, type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._type = type
def __setitem__(self, key, value):
# Automatically convert the value into a tuple (my_type, value)
super().__setitem__(key, (self._type, value))
def _define_custom_prompts() -> CustomPromptsDict:
custom_prompts: CustomPromptsDict = CustomPromptsDict(type=BasePromptTemplate)
today_date = datetime.datetime.now().strftime("%B %d, %Y")
# ---------------------------------------------------------------------------
# Prompt for question rephrasing
# ---------------------------------------------------------------------------
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
custom_prompts["CONDENSE_QUESTION_PROMPT"] = CONDENSE_QUESTION_PROMPT
# ---------------------------------------------------------------------------
# Prompt for RAG
# ---------------------------------------------------------------------------
system_message_template = (
f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}."
)
system_message_template += """
When answering use markdown.
Use markdown code blocks for code snippets.
Answer in a concise and clear manner.
Use the following pieces of context from files provided by the user to answer the users.
Answer in the same language as the user question.
If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer.
Don't cite the source id in the answer objects, but you can use the source to answer the question.
You have access to the files to answer the user question (limited to first 20 files):
{files}
If not None, User instruction to follow to answer: {custom_instructions}
Don't cite the source id in the answer objects, but you can use the source to answer the question.
"""
template_answer = """
Context:
{context}
User Question: {question}
Answer:
"""
RAG_ANSWER_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(system_message_template),
HumanMessagePromptTemplate.from_template(template_answer),
]
)
custom_prompts["RAG_ANSWER_PROMPT"] = RAG_ANSWER_PROMPT
# ---------------------------------------------------------------------------
# Prompt for formatting documents
# ---------------------------------------------------------------------------
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(
template="Source: {index} \n {page_content}"
)
custom_prompts["DEFAULT_DOCUMENT_PROMPT"] = DEFAULT_DOCUMENT_PROMPT
# ---------------------------------------------------------------------------
# Prompt for chatting directly with LLMs, without any document retrieval stage
# ---------------------------------------------------------------------------
system_message_template = (
f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}."
)
system_message_template += """
If not None, also follow these user instructions when answering: {custom_instructions}
"""
template_answer = """
User Question: {question}
Answer:
"""
CHAT_LLM_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(system_message_template),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(template_answer),
]
)
custom_prompts["CHAT_LLM_PROMPT"] = CHAT_LLM_PROMPT
return custom_prompts
_custom_prompts = _define_custom_prompts()
CustomPromptsModel = create_model(
"CustomPromptsModel", **_custom_prompts, __config__=ConfigDict(extra="forbid")
)
custom_prompts = CustomPromptsModel()