feat(prompt): improved answer readability with markdown and aerataed (#1190)

This commit is contained in:
Stan Girard 2023-09-18 00:22:57 +02:00 committed by GitHub
parent 2e4fdc80ec
commit 4a0a7abbc0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 12 additions and 14 deletions

View File

@ -13,6 +13,8 @@ from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from llm.utils.get_prompt_to_use import get_prompt_to_use
from llm.utils.get_prompt_to_use_id import get_prompt_to_use_id
from logger import get_logger
from models.chats import ChatQuestion
from models.databases.supabase.chats import CreateChatHistory
@ -27,9 +29,6 @@ from repository.chat import (
from supabase.client import Client, create_client
from vectorstore.supabase import CustomSupabaseVectorStore
from llm.utils.get_prompt_to_use import get_prompt_to_use
from llm.utils.get_prompt_to_use_id import get_prompt_to_use_id
from .base import BaseBrainPicking
from .prompts.CONDENSE_PROMPT import CONDENSE_QUESTION_PROMPT
@ -110,11 +109,11 @@ class QABaseBrainPicking(BaseBrainPicking):
streaming=streaming,
verbose=False,
callbacks=callbacks,
openai_api_key=self.openai_api_key
openai_api_key=self.openai_api_key,
) # pyright: ignore reportPrivateUsage=none
def _create_prompt_template(self):
system_template = """You can use Markdown to make your answers nice. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
system_template = """ When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
----------------
{context}"""
@ -212,7 +211,10 @@ class QABaseBrainPicking(BaseBrainPicking):
self.callbacks = [callback]
answering_llm = self._create_llm(
model=self.model, streaming=True, callbacks=self.callbacks, max_tokens=self.max_tokens
model=self.model,
streaming=True,
callbacks=self.callbacks,
max_tokens=self.max_tokens,
)
# The Chain that generates the answer to the question

View File

@ -7,10 +7,9 @@ from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
from langchain.chains import LLMChain
from langchain.chat_models import ChatLiteLLM
from langchain.chat_models.base import BaseChatModel
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from llm.utils.get_prompt_to_use import get_prompt_to_use
from llm.utils.get_prompt_to_use_id import get_prompt_to_use_id
from logger import get_logger
from models.chats import ChatQuestion
from models.databases.supabase.chats import CreateChatHistory
@ -25,11 +24,8 @@ from repository.chat import (
update_message_by_id,
)
from llm.utils.get_prompt_to_use import get_prompt_to_use
from llm.utils.get_prompt_to_use_id import get_prompt_to_use_id
logger = get_logger(__name__)
SYSTEM_MESSAGE = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer."
SYSTEM_MESSAGE = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer.When answering use markdown or any other techniques to display the content in a nice and aerated way."
class HeadlessQA(BaseModel):