2023-06-30 11:10:59 +03:00
|
|
|
import asyncio
|
|
|
|
import json
|
|
|
|
from typing import AsyncIterable, Awaitable
|
|
|
|
|
|
|
|
from langchain.callbacks import AsyncIteratorCallbackHandler
|
2023-05-31 14:51:23 +03:00
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
# Importing various modules and classes from a custom library 'langchain' likely used for natural language processing
|
2023-06-17 02:16:11 +03:00
|
|
|
from langchain.chains import ConversationalRetrievalChain, LLMChain
|
2023-06-13 11:35:06 +03:00
|
|
|
from langchain.chains.question_answering import load_qa_chain
|
2023-06-22 18:50:06 +03:00
|
|
|
from langchain.chat_models import ChatOpenAI
|
2023-05-31 14:51:23 +03:00
|
|
|
from langchain.embeddings.openai import OpenAIEmbeddings
|
2023-06-22 11:45:35 +03:00
|
|
|
from langchain.llms.base import LLM
|
2023-06-17 02:16:11 +03:00
|
|
|
from llm.prompt.CONDENSE_PROMPT import CONDENSE_QUESTION_PROMPT
|
2023-06-26 11:34:03 +03:00
|
|
|
from logger import get_logger
|
2023-06-30 11:10:59 +03:00
|
|
|
from models.settings import BrainSettings # Importing settings related to the 'brain'
|
2023-06-22 11:45:35 +03:00
|
|
|
from pydantic import BaseModel # For data validation and settings management
|
2023-06-26 11:34:03 +03:00
|
|
|
from repository.chat.get_chat_history import get_chat_history
|
2023-06-30 11:10:59 +03:00
|
|
|
from repository.chat.update_chat_history import update_chat_history
|
|
|
|
from repository.chat.update_message_by_id import update_message_by_id
|
2023-06-22 11:45:35 +03:00
|
|
|
from supabase import Client # For interacting with Supabase database
|
|
|
|
from supabase import create_client
|
2023-06-30 11:10:59 +03:00
|
|
|
from vectorstore.supabase import (
|
|
|
|
CustomSupabaseVectorStore,
|
|
|
|
) # Custom class for handling vector storage with Supabase
|
2023-05-22 09:39:55 +03:00
|
|
|
|
2023-06-22 11:45:35 +03:00
|
|
|
logger = get_logger(__name__)
|
2023-06-10 11:43:44 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
|
2023-06-19 21:51:13 +03:00
|
|
|
class BrainPicking(BaseModel):
|
2023-06-20 09:56:17 +03:00
|
|
|
"""
|
|
|
|
Main class for the Brain Picking functionality.
|
|
|
|
It allows to initialize a Chat model, generate questions and retrieve answers using ConversationalRetrievalChain.
|
|
|
|
"""
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-30 11:10:59 +03:00
|
|
|
# Instantiate settings
|
|
|
|
settings = BrainSettings()
|
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
# Default class attributes
|
2023-06-19 21:51:13 +03:00
|
|
|
llm_name: str = "gpt-3.5-turbo"
|
2023-06-26 11:34:03 +03:00
|
|
|
temperature: float = 0.0
|
2023-06-30 11:10:59 +03:00
|
|
|
chat_id: str
|
|
|
|
max_tokens: int = 256
|
|
|
|
|
|
|
|
# Storage
|
2023-06-19 21:51:13 +03:00
|
|
|
supabase_client: Client = None
|
|
|
|
vector_store: CustomSupabaseVectorStore = None
|
2023-06-30 11:10:59 +03:00
|
|
|
|
|
|
|
# Language models
|
|
|
|
embeddings: OpenAIEmbeddings = None
|
|
|
|
question_llm: LLM = None
|
|
|
|
doc_llm: LLM = None
|
2023-06-19 21:51:13 +03:00
|
|
|
question_generator: LLMChain = None
|
2023-06-30 11:10:59 +03:00
|
|
|
doc_chain: LLMChain = None
|
|
|
|
qa: ConversationalRetrievalChain = None
|
|
|
|
|
|
|
|
# Streaming
|
|
|
|
callback: AsyncIteratorCallbackHandler = None
|
|
|
|
streaming: bool = False
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-19 21:51:13 +03:00
|
|
|
class Config:
|
2023-06-20 09:56:17 +03:00
|
|
|
# Allowing arbitrary types for class validation
|
2023-06-19 21:51:13 +03:00
|
|
|
arbitrary_types_allowed = True
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model: str,
|
2023-06-28 20:39:27 +03:00
|
|
|
brain_id: str,
|
2023-06-26 11:34:03 +03:00
|
|
|
temperature: float,
|
2023-06-22 18:50:06 +03:00
|
|
|
chat_id: str,
|
|
|
|
max_tokens: int,
|
|
|
|
user_openai_api_key: str,
|
2023-06-30 11:10:59 +03:00
|
|
|
streaming: bool = False,
|
2023-06-22 18:50:06 +03:00
|
|
|
) -> "BrainPicking":
|
2023-06-20 09:56:17 +03:00
|
|
|
"""
|
|
|
|
Initialize the BrainPicking class by setting embeddings, supabase client, vector store, language model and chains.
|
|
|
|
:param model: Language model name to be used.
|
2023-06-28 20:39:27 +03:00
|
|
|
:param user_brain_idid: The brain id to be used for CustomSupabaseVectorStore.
|
2023-06-20 09:56:17 +03:00
|
|
|
:return: BrainPicking instance
|
|
|
|
"""
|
2023-06-22 18:50:06 +03:00
|
|
|
super().__init__(
|
|
|
|
model=model,
|
2023-06-28 20:39:27 +03:00
|
|
|
brain_id=brain_id,
|
2023-06-22 18:50:06 +03:00
|
|
|
chat_id=chat_id,
|
|
|
|
max_tokens=max_tokens,
|
2023-06-26 11:34:03 +03:00
|
|
|
temperature=temperature,
|
2023-06-22 18:50:06 +03:00
|
|
|
user_openai_api_key=user_openai_api_key,
|
|
|
|
)
|
|
|
|
# If user provided an API key, update the settings
|
|
|
|
if user_openai_api_key is not None:
|
|
|
|
self.settings.openai_api_key = user_openai_api_key
|
|
|
|
|
2023-06-26 11:34:03 +03:00
|
|
|
self.temperature = temperature
|
2023-06-19 21:51:13 +03:00
|
|
|
self.embeddings = OpenAIEmbeddings(openai_api_key=self.settings.openai_api_key)
|
2023-06-22 11:45:35 +03:00
|
|
|
self.supabase_client = create_client(
|
|
|
|
self.settings.supabase_url, self.settings.supabase_service_key
|
|
|
|
)
|
2023-06-26 11:34:03 +03:00
|
|
|
self.llm_name = model
|
2023-06-19 21:51:13 +03:00
|
|
|
self.vector_store = CustomSupabaseVectorStore(
|
2023-06-22 11:45:35 +03:00
|
|
|
self.supabase_client,
|
|
|
|
self.embeddings,
|
|
|
|
table_name="vectors",
|
2023-06-28 20:39:27 +03:00
|
|
|
brain_id=brain_id,
|
2023-06-22 11:45:35 +03:00
|
|
|
)
|
2023-06-22 18:50:06 +03:00
|
|
|
|
2023-06-30 11:10:59 +03:00
|
|
|
self.question_llm = self._create_llm(
|
2023-06-22 11:45:35 +03:00
|
|
|
model_name=self.llm_name,
|
2023-06-30 11:10:59 +03:00
|
|
|
streaming=False,
|
2023-06-22 11:45:35 +03:00
|
|
|
)
|
|
|
|
self.question_generator = LLMChain(
|
2023-06-30 11:10:59 +03:00
|
|
|
llm=self.question_llm, prompt=CONDENSE_QUESTION_PROMPT
|
2023-06-22 11:45:35 +03:00
|
|
|
)
|
2023-06-30 11:10:59 +03:00
|
|
|
|
|
|
|
if streaming:
|
|
|
|
self.callback = AsyncIteratorCallbackHandler()
|
|
|
|
self.doc_llm = self._create_llm(
|
|
|
|
model_name=self.llm_name,
|
|
|
|
streaming=streaming,
|
|
|
|
callbacks=[self.callback],
|
|
|
|
)
|
|
|
|
self.doc_chain = load_qa_chain(
|
|
|
|
llm=self.doc_llm,
|
|
|
|
chain_type="stuff",
|
|
|
|
)
|
|
|
|
self.streaming = streaming
|
|
|
|
else:
|
|
|
|
self.doc_llm = self._create_llm(
|
|
|
|
model_name=self.llm_name,
|
|
|
|
streaming=streaming,
|
|
|
|
)
|
|
|
|
self.doc_chain = load_qa_chain(llm=self.doc_llm, chain_type="stuff")
|
|
|
|
self.streaming = streaming
|
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
self.chat_id = chat_id
|
|
|
|
self.max_tokens = max_tokens
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-30 11:10:59 +03:00
|
|
|
def _create_llm(self, model_name, streaming=False, callbacks=None) -> LLM:
|
2023-06-22 11:45:35 +03:00
|
|
|
"""
|
|
|
|
Determine the language model to be used.
|
|
|
|
:param model_name: Language model name to be used.
|
|
|
|
:param private_model_args: Dictionary containing model_path, n_ctx and n_batch.
|
|
|
|
:param private: Boolean value to determine if private model is to be used.
|
|
|
|
:return: Language model instance
|
|
|
|
"""
|
2023-06-30 11:10:59 +03:00
|
|
|
return ChatOpenAI(
|
|
|
|
temperature=0,
|
|
|
|
model_name=model_name,
|
|
|
|
streaming=streaming,
|
|
|
|
callbacks=callbacks,
|
|
|
|
)
|
2023-06-22 11:45:35 +03:00
|
|
|
|
|
|
|
def _get_qa(
|
2023-06-22 18:50:06 +03:00
|
|
|
self,
|
2023-06-22 11:45:35 +03:00
|
|
|
) -> ConversationalRetrievalChain:
|
2023-06-20 09:56:17 +03:00
|
|
|
"""
|
|
|
|
Retrieves a QA chain for the given chat message and API key.
|
|
|
|
:param chat_message: The chat message containing history.
|
|
|
|
:param user_openai_api_key: The OpenAI API key to be used.
|
|
|
|
:return: ConversationalRetrievalChain instance
|
|
|
|
"""
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
# Initialize and return a ConversationalRetrievalChain
|
2023-06-17 02:16:11 +03:00
|
|
|
qa = ConversationalRetrievalChain(
|
2023-06-22 11:45:35 +03:00
|
|
|
retriever=self.vector_store.as_retriever(),
|
|
|
|
question_generator=self.question_generator,
|
|
|
|
combine_docs_chain=self.doc_chain,
|
2023-06-30 11:10:59 +03:00
|
|
|
verbose=True,
|
2023-06-22 11:45:35 +03:00
|
|
|
)
|
2023-06-30 11:10:59 +03:00
|
|
|
|
2023-06-19 21:51:13 +03:00
|
|
|
return qa
|
2023-06-20 00:14:42 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
def generate_answer(self, question: str) -> str:
|
2023-06-20 09:56:17 +03:00
|
|
|
"""
|
2023-06-22 18:50:06 +03:00
|
|
|
Generate an answer to a given question by interacting with the language model.
|
|
|
|
:param question: The question
|
2023-06-20 09:56:17 +03:00
|
|
|
:return: The generated answer.
|
|
|
|
"""
|
2023-06-20 00:14:42 +03:00
|
|
|
transformed_history = []
|
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
# Get the QA chain
|
2023-06-22 18:50:06 +03:00
|
|
|
qa = self._get_qa()
|
|
|
|
history = get_chat_history(self.chat_id)
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
# Format the chat history into a list of tuples (human, ai)
|
|
|
|
transformed_history = [(chat.user_message, chat.assistant) for chat in history]
|
2023-06-22 11:45:35 +03:00
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
# Generate the model response using the QA chain
|
2023-06-22 18:50:06 +03:00
|
|
|
model_response = qa({"question": question, "chat_history": transformed_history})
|
2023-06-22 11:45:35 +03:00
|
|
|
answer = model_response["answer"]
|
2023-06-20 00:14:42 +03:00
|
|
|
|
2023-06-20 09:56:17 +03:00
|
|
|
return answer
|
2023-06-30 11:10:59 +03:00
|
|
|
|
|
|
|
async def generate_stream(self, question: str) -> AsyncIterable:
|
|
|
|
"""
|
|
|
|
Generate a streaming answer to a given question by interacting with the language model.
|
|
|
|
:param question: The question
|
|
|
|
:return: An async iterable which generates the answer.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Get the QA chain
|
|
|
|
qa = self._get_qa()
|
|
|
|
history = get_chat_history(self.chat_id)
|
|
|
|
callback = self.callback
|
|
|
|
|
|
|
|
# # Format the chat history into a list of tuples (human, ai)
|
|
|
|
transformed_history = [(chat.user_message, chat.assistant) for chat in history]
|
|
|
|
|
|
|
|
# Initialize a list to hold the tokens
|
|
|
|
response_tokens = []
|
|
|
|
|
|
|
|
# Wrap an awaitable with a event to signal when it's done or an exception is raised.
|
|
|
|
async def wrap_done(fn: Awaitable, event: asyncio.Event):
|
|
|
|
try:
|
|
|
|
await fn
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Caught exception: {e}")
|
|
|
|
finally:
|
|
|
|
event.set()
|
|
|
|
|
|
|
|
# Use the acall method to perform an async call to the QA chain
|
|
|
|
task = asyncio.create_task(
|
|
|
|
wrap_done(
|
|
|
|
qa.acall(
|
|
|
|
{
|
|
|
|
"question": question,
|
|
|
|
"chat_history": transformed_history,
|
|
|
|
}
|
|
|
|
),
|
|
|
|
callback.done,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
streamed_chat_history = update_chat_history(
|
|
|
|
chat_id=self.chat_id,
|
|
|
|
user_message=question,
|
|
|
|
assistant="",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Use the aiter method of the callback to stream the response with server-sent-events
|
|
|
|
async for token in callback.aiter():
|
|
|
|
logger.info("Token: %s", token)
|
|
|
|
|
|
|
|
# Add the token to the response_tokens list
|
|
|
|
response_tokens.append(token)
|
|
|
|
streamed_chat_history.assistant = token
|
|
|
|
|
|
|
|
yield f"data: {json.dumps(streamed_chat_history.to_dict())}"
|
|
|
|
|
|
|
|
await task
|
|
|
|
|
|
|
|
# Join the tokens to create the assistant's response
|
|
|
|
assistant = "".join(response_tokens)
|
|
|
|
|
|
|
|
update_message_by_id(
|
|
|
|
message_id=streamed_chat_history.message_id,
|
|
|
|
user_message=question,
|
|
|
|
assistant=assistant,
|
|
|
|
)
|