2023-07-05 19:15:18 +03:00
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2023-07-05 15:37:01 +03:00
|
|
|
from typing import List
|
|
|
|
|
2023-05-22 09:39:55 +03:00
|
|
|
from langchain.embeddings.openai import OpenAIEmbeddings
|
|
|
|
from langchain.schema import Document
|
2023-07-04 18:56:54 +03:00
|
|
|
from llm.utils.summarization import llm_summerize
|
2023-05-22 09:39:55 +03:00
|
|
|
from logger import get_logger
|
2023-07-05 15:37:01 +03:00
|
|
|
from models.settings import BrainSettings, CommonsDep, common_dependencies
|
2023-06-19 22:15:35 +03:00
|
|
|
from pydantic import BaseModel
|
2023-05-22 09:39:55 +03:00
|
|
|
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
2023-06-19 22:15:35 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
class Neurons(BaseModel):
|
2023-06-19 22:15:35 +03:00
|
|
|
commons: CommonsDep
|
|
|
|
settings = BrainSettings()
|
2023-07-04 18:56:54 +03:00
|
|
|
|
2023-06-28 20:39:27 +03:00
|
|
|
def create_vector(self, doc, user_openai_api_key=None):
|
2023-07-04 18:56:54 +03:00
|
|
|
logger.info("Creating vector for document")
|
2023-06-19 22:15:35 +03:00
|
|
|
logger.info(f"Document: {doc}")
|
|
|
|
if user_openai_api_key:
|
2023-06-22 18:50:06 +03:00
|
|
|
self.commons["documents_vector_store"]._embedding = OpenAIEmbeddings(
|
|
|
|
openai_api_key=user_openai_api_key
|
|
|
|
)
|
2023-06-19 22:15:35 +03:00
|
|
|
try:
|
2023-06-22 18:50:06 +03:00
|
|
|
sids = self.commons["documents_vector_store"].add_documents([doc])
|
2023-06-19 22:15:35 +03:00
|
|
|
if sids and len(sids) > 0:
|
2023-06-28 20:39:27 +03:00
|
|
|
return sids
|
|
|
|
|
2023-06-19 22:15:35 +03:00
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Error creating vector for document {e}")
|
|
|
|
|
|
|
|
def create_embedding(self, content):
|
2023-06-22 18:50:06 +03:00
|
|
|
return self.commons["embeddings"].embed_query(content)
|
2023-06-19 22:15:35 +03:00
|
|
|
|
2023-06-22 18:50:06 +03:00
|
|
|
def similarity_search(self, query, table="match_summaries", top_k=5, threshold=0.5):
|
2023-06-19 22:15:35 +03:00
|
|
|
query_embedding = self.create_embedding(query)
|
2023-06-22 18:50:06 +03:00
|
|
|
summaries = (
|
|
|
|
self.commons["supabase"]
|
|
|
|
.rpc(
|
|
|
|
table,
|
|
|
|
{
|
|
|
|
"query_embedding": query_embedding,
|
|
|
|
"match_count": top_k,
|
|
|
|
"match_threshold": threshold,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.execute()
|
|
|
|
)
|
2023-06-19 22:15:35 +03:00
|
|
|
return summaries.data
|
|
|
|
|
|
|
|
|
2023-06-17 00:36:53 +03:00
|
|
|
def create_summary(commons: CommonsDep, document_id, content, metadata):
|
2023-05-22 09:39:55 +03:00
|
|
|
logger.info(f"Summarizing document {content[:100]}")
|
|
|
|
summary = llm_summerize(content)
|
|
|
|
logger.info(f"Summary: {summary}")
|
2023-06-22 18:50:06 +03:00
|
|
|
metadata["document_id"] = document_id
|
|
|
|
summary_doc_with_metadata = Document(page_content=summary, metadata=metadata)
|
|
|
|
sids = commons["summaries_vector_store"].add_documents([summary_doc_with_metadata])
|
2023-05-22 09:39:55 +03:00
|
|
|
if sids and len(sids) > 0:
|
2023-07-04 18:56:54 +03:00
|
|
|
commons["supabase"].table("summaries").update(
|
|
|
|
{"document_id": document_id}
|
|
|
|
).match({"id": sids[0]}).execute()
|
2023-07-05 15:37:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
def error_callback(exception):
|
|
|
|
print('An exception occurred:', exception)
|
|
|
|
|
|
|
|
|
|
|
|
def process_batch(batch_ids):
|
|
|
|
commons = common_dependencies()
|
|
|
|
if len(batch_ids) == 1:
|
|
|
|
return (
|
|
|
|
commons["supabase"]
|
|
|
|
.table("vectors")
|
|
|
|
.select(
|
|
|
|
"name:metadata->>file_name, size:metadata->>file_size",
|
|
|
|
count="exact",
|
|
|
|
)
|
|
|
|
.filter("id", "eq", batch_ids[0])
|
|
|
|
.execute()
|
|
|
|
).data
|
|
|
|
else:
|
|
|
|
return (
|
|
|
|
commons["supabase"]
|
|
|
|
.table("vectors")
|
|
|
|
.select(
|
|
|
|
"name:metadata->>file_name, size:metadata->>file_size",
|
|
|
|
count="exact",
|
|
|
|
)
|
|
|
|
.filter("id", "in", tuple(batch_ids))
|
|
|
|
.execute()
|
|
|
|
).data
|
|
|
|
|
|
|
|
|
|
|
|
def get_unique_files_from_vector_ids(vectors_ids: List[int]):
|
|
|
|
# Move into Vectors class
|
|
|
|
"""
|
|
|
|
Retrieve unique user data vectors.
|
|
|
|
"""
|
|
|
|
print("vectors_ids", vectors_ids)
|
|
|
|
|
|
|
|
# constants
|
|
|
|
BATCH_SIZE = 5
|
|
|
|
|
2023-07-05 19:15:18 +03:00
|
|
|
with ThreadPoolExecutor() as executor:
|
|
|
|
futures = []
|
|
|
|
for i in range(0, len(vectors_ids), BATCH_SIZE):
|
|
|
|
batch_ids = vectors_ids[i:i + BATCH_SIZE]
|
|
|
|
future = executor.submit(process_batch, batch_ids)
|
|
|
|
futures.append(future)
|
2023-07-05 15:37:01 +03:00
|
|
|
|
2023-07-05 19:15:18 +03:00
|
|
|
# Retrieve the results
|
|
|
|
vectors_responses = [future.result() for future in futures]
|
|
|
|
|
2023-07-05 15:37:01 +03:00
|
|
|
documents = [item for sublist in vectors_responses for item in sublist]
|
|
|
|
print('document', documents)
|
|
|
|
unique_files = [dict(t) for t in set(tuple(d.items()) for d in documents)]
|
|
|
|
return unique_files
|