quivr/backend/llm/brainpicking.py

185 lines
6.7 KiB
Python
Raw Normal View History

from typing import Any, Dict
2023-06-20 09:56:17 +03:00
# Importing various modules and classes from a custom library 'langchain' likely used for natural language processing
2023-06-17 02:16:11 +03:00
from langchain.chains import ConversationalRetrievalChain, LLMChain
2023-06-13 11:35:06 +03:00
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms.base import LLM
from langchain.memory import ConversationBufferMemory
2023-06-17 02:16:11 +03:00
from llm.prompt.CONDENSE_PROMPT import CONDENSE_QUESTION_PROMPT
from logger import get_logger
2023-06-28 20:39:27 +03:00
from models.settings import \
BrainSettings # Importing settings related to the 'brain'
from models.settings import LLMSettings # For type hinting
from pydantic import BaseModel # For data validation and settings management
from repository.chat.get_chat_history import get_chat_history
2023-06-28 20:39:27 +03:00
from vectorstore.supabase import \
CustomSupabaseVectorStore # Custom class for handling vector storage with Supabase
from supabase import Client # For interacting with Supabase database
from supabase import create_client
2023-05-22 09:39:55 +03:00
logger = get_logger(__name__)
class AnswerConversationBufferMemory(ConversationBufferMemory):
2023-06-20 09:56:17 +03:00
"""
This class is a specialized version of ConversationBufferMemory.
It overrides the save_context method to save the response using the 'answer' key in the outputs.
Reference to some issue comment is given in the docstring.
"""
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
2023-06-20 09:56:17 +03:00
# Overriding the save_context method of the parent class
return super(AnswerConversationBufferMemory, self).save_context(
inputs, {"response": outputs["answer"]}
)
2023-06-20 09:56:17 +03:00
def format_chat_history(inputs) -> str:
2023-06-20 09:56:17 +03:00
"""
Function to concatenate chat history into a single string.
:param inputs: List of tuples containing human and AI messages.
:return: concatenated string of chat history
"""
2023-06-17 02:16:11 +03:00
res = []
for human, ai in inputs:
res.append(f"{human}:{ai}\n")
return "\n".join(res)
2023-06-20 09:56:17 +03:00
2023-06-19 21:51:13 +03:00
class BrainPicking(BaseModel):
2023-06-20 09:56:17 +03:00
"""
Main class for the Brain Picking functionality.
It allows to initialize a Chat model, generate questions and retrieve answers using ConversationalRetrievalChain.
"""
2023-06-20 09:56:17 +03:00
# Default class attributes
2023-06-19 21:51:13 +03:00
llm_name: str = "gpt-3.5-turbo"
temperature: float = 0.0
2023-06-19 21:51:13 +03:00
settings = BrainSettings()
llm_config = LLMSettings()
2023-06-19 21:51:13 +03:00
embeddings: OpenAIEmbeddings = None
supabase_client: Client = None
vector_store: CustomSupabaseVectorStore = None
llm: LLM = None
2023-06-19 21:51:13 +03:00
question_generator: LLMChain = None
doc_chain: ConversationalRetrievalChain = None
chat_id: str
max_tokens: int = 256
2023-06-19 21:51:13 +03:00
class Config:
2023-06-20 09:56:17 +03:00
# Allowing arbitrary types for class validation
2023-06-19 21:51:13 +03:00
arbitrary_types_allowed = True
def __init__(
self,
model: str,
2023-06-28 20:39:27 +03:00
brain_id: str,
temperature: float,
chat_id: str,
max_tokens: int,
user_openai_api_key: str,
) -> "BrainPicking":
2023-06-20 09:56:17 +03:00
"""
Initialize the BrainPicking class by setting embeddings, supabase client, vector store, language model and chains.
:param model: Language model name to be used.
2023-06-28 20:39:27 +03:00
:param user_brain_idid: The brain id to be used for CustomSupabaseVectorStore.
2023-06-20 09:56:17 +03:00
:return: BrainPicking instance
"""
super().__init__(
model=model,
2023-06-28 20:39:27 +03:00
brain_id=brain_id,
chat_id=chat_id,
max_tokens=max_tokens,
temperature=temperature,
user_openai_api_key=user_openai_api_key,
)
# If user provided an API key, update the settings
if user_openai_api_key is not None:
self.settings.openai_api_key = user_openai_api_key
self.temperature = temperature
2023-06-19 21:51:13 +03:00
self.embeddings = OpenAIEmbeddings(openai_api_key=self.settings.openai_api_key)
self.supabase_client = create_client(
self.settings.supabase_url, self.settings.supabase_service_key
)
self.llm_name = model
2023-06-19 21:51:13 +03:00
self.vector_store = CustomSupabaseVectorStore(
self.supabase_client,
self.embeddings,
table_name="vectors",
2023-06-28 20:39:27 +03:00
brain_id=brain_id,
)
self.llm = self._determine_llm(
private_model_args={
"model_path": self.llm_config.model_path,
"n_ctx": self.llm_config.model_n_ctx,
"n_batch": self.llm_config.model_n_batch,
},
private=self.llm_config.private,
model_name=self.llm_name,
)
self.question_generator = LLMChain(
llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT
)
2023-06-19 21:51:13 +03:00
self.doc_chain = load_qa_chain(self.llm, chain_type="stuff")
self.chat_id = chat_id
self.max_tokens = max_tokens
def _determine_llm(
self, private_model_args: dict, private: bool = False, model_name: str = None
) -> LLM:
"""
Determine the language model to be used.
:param model_name: Language model name to be used.
:param private_model_args: Dictionary containing model_path, n_ctx and n_batch.
:param private: Boolean value to determine if private model is to be used.
:return: Language model instance
"""
return ChatOpenAI(temperature=0, model_name=model_name)
def _get_qa(
self,
) -> ConversationalRetrievalChain:
2023-06-20 09:56:17 +03:00
"""
Retrieves a QA chain for the given chat message and API key.
:param chat_message: The chat message containing history.
:param user_openai_api_key: The OpenAI API key to be used.
:return: ConversationalRetrievalChain instance
"""
2023-06-20 09:56:17 +03:00
# Initialize and return a ConversationalRetrievalChain
2023-06-17 02:16:11 +03:00
qa = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
max_tokens_limit=self.max_tokens,
question_generator=self.question_generator,
combine_docs_chain=self.doc_chain,
get_chat_history=format_chat_history,
)
2023-06-19 21:51:13 +03:00
return qa
def generate_answer(self, question: str) -> str:
2023-06-20 09:56:17 +03:00
"""
Generate an answer to a given question by interacting with the language model.
:param question: The question
2023-06-20 09:56:17 +03:00
:return: The generated answer.
"""
transformed_history = []
2023-06-20 09:56:17 +03:00
# Get the QA chain
qa = self._get_qa()
history = get_chat_history(self.chat_id)
# Format the chat history into a list of tuples (human, ai)
transformed_history = [(chat.user_message, chat.assistant) for chat in history]
2023-06-20 09:56:17 +03:00
# Generate the model response using the QA chain
model_response = qa({"question": question, "chat_history": transformed_history})
answer = model_response["answer"]
2023-06-20 09:56:17 +03:00
return answer