feat(metadata): added file size

This commit is contained in:
Stan Girard 2023-05-13 01:12:51 +02:00
parent badb27bf19
commit 8f3d59e955
4 changed files with 49 additions and 4 deletions

38
brain.py Normal file
View File

@ -0,0 +1,38 @@
import streamlit as st
import numpy as np
def brain(supabase):
## List all documents
response = supabase.table("documents").select("name:metadata->>file_name, size:metadata->>file_size", count="exact").execute()
st.markdown(f"**Docs in DB:** {response.count}")
documents = response.data # Access the data from the response
# Convert each dictionary to a tuple of items, then to a set to remove duplicates, and then back to a dictionary
unique_data = [dict(t) for t in set(tuple(d.items()) for d in documents)]
# Sort the list of documents by size in decreasing order
unique_data.sort(key=lambda x: int(x['size']), reverse=True)
for document in unique_data:
# Create a unique key for each button by using the document name
button_key = f"delete_{document['name']}"
# Display the document name, size and the delete button on the same line
col1, col2, col3 = st.columns([3, 1, 1])
col1.write(f"{document['name']} ({document['size']} bytes)")
if col2.button('', key=button_key):
delete_document(supabase, document['name'])
def delete_document(supabase, document_name):
# Delete the document from the database
response = supabase.table("documents").delete().match({"metadata->>file_name": document_name}).execute()
# Check if the deletion was successful
if len(response.data) > 0:
st.write(f"✂️ {document_name} was deleted.")
else:
st.write(f"{document_name} was not deleted.")

View File

@ -31,12 +31,15 @@ def _transcribe_audio(api_key, audio_file):
return transcript
def process_audio(openai_api_key, vector_store, file_name):
def process_audio(vector_store, file_name):
file_sha = ""
dateshort = time.strftime("%Y%m%d-%H%M%S")
file_name = f"audiotranscript_{dateshort}.audio"
file_meta_name = f"audiotranscript_{dateshort}.txt"
openai_api_key = st.secrets["openai_api_key"]
transcript = _transcribe_audio(openai_api_key, file_name)
file_sha = compute_sha1_from_content(transcript.text.encode("utf-8"))
## file size computed from transcript
file_size = len(transcript.text.encode("utf-8"))
## Load chunk size and overlap from sidebar
@ -46,7 +49,7 @@ def process_audio(openai_api_key, vector_store, file_name):
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_text(transcript.text)
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha,"file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha,"file_size": file_size, "file_name": file_meta_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]
vector_store.add_documents(docs_with_metadata)

View File

@ -9,6 +9,7 @@ def process_file(vector_store, file, loader_class, file_suffix):
documents = []
file_sha = ""
file_name = file.name
file_size = file.size
dateshort = time.strftime("%Y%m%d")
with tempfile.NamedTemporaryFile(delete=True, suffix=file_suffix) as tmp_file:
tmp_file.write(file.getvalue())
@ -26,7 +27,7 @@ def process_file(vector_store, file, loader_class, file_suffix):
documents = text_splitter.split_documents(documents)
# Add the document sha1 as metadata to each document
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1, "file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for doc in documents]
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1,"file_size":file_size ,"file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for doc in documents]
vector_store.add_documents(docs_with_metadata)
return

View File

@ -5,6 +5,7 @@ import tempfile
import streamlit as st
from files import file_uploader
from question import chat_with_doc
from brain import brain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase import Client, create_client
@ -45,6 +46,8 @@ user_choice = st.radio("Choose an action", ('Add Knowledge to the Brain', 'Ask a
st.markdown("---\n\n")
brain(supabase)
if user_choice == 'Add Knowledge to the Brain':
# Display chunk size and overlap selection only when adding knowledge
st.sidebar.title("Configuration")